CGBuiltin.cpp 298 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163
  1. //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This contains code to emit Builtin calls as LLVM code.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "CodeGenFunction.h"
  14. #include "CGCXXABI.h"
  15. #include "CGObjCRuntime.h"
  16. #include "CodeGenModule.h"
  17. #include "TargetInfo.h"
  18. #include "clang/AST/ASTContext.h"
  19. #include "clang/AST/Decl.h"
  20. #include "clang/Basic/TargetBuiltins.h"
  21. #include "clang/Basic/TargetInfo.h"
  22. #include "clang/CodeGen/CGFunctionInfo.h"
  23. #include "llvm/ADT/StringExtras.h"
  24. #include "llvm/IR/CallSite.h"
  25. #include "llvm/IR/DataLayout.h"
  26. #include "llvm/IR/InlineAsm.h"
  27. #include "llvm/IR/Intrinsics.h"
  28. #include <sstream>
  29. using namespace clang;
  30. using namespace CodeGen;
  31. using namespace llvm;
  32. /// getBuiltinLibFunction - Given a builtin id for a function like
  33. /// "__builtin_fabsf", return a Function* for "fabsf".
  34. llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
  35. unsigned BuiltinID) {
  36. assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
  37. // Get the name, skip over the __builtin_ prefix (if necessary).
  38. StringRef Name;
  39. GlobalDecl D(FD);
  40. // If the builtin has been declared explicitly with an assembler label,
  41. // use the mangled name. This differs from the plain label on platforms
  42. // that prefix labels.
  43. if (FD->hasAttr<AsmLabelAttr>())
  44. Name = getMangledName(D);
  45. else
  46. Name = Context.BuiltinInfo.GetName(BuiltinID) + 10;
  47. llvm::FunctionType *Ty =
  48. cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
  49. return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
  50. }
  51. /// Emit the conversions required to turn the given value into an
  52. /// integer of the given size.
  53. static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
  54. QualType T, llvm::IntegerType *IntType) {
  55. V = CGF.EmitToMemory(V, T);
  56. if (V->getType()->isPointerTy())
  57. return CGF.Builder.CreatePtrToInt(V, IntType);
  58. assert(V->getType() == IntType);
  59. return V;
  60. }
  61. static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
  62. QualType T, llvm::Type *ResultType) {
  63. V = CGF.EmitFromMemory(V, T);
  64. if (ResultType->isPointerTy())
  65. return CGF.Builder.CreateIntToPtr(V, ResultType);
  66. assert(V->getType() == ResultType);
  67. return V;
  68. }
  69. /// Utility to insert an atomic instruction based on Instrinsic::ID
  70. /// and the expression node.
  71. static Value *MakeBinaryAtomicValue(CodeGenFunction &CGF,
  72. llvm::AtomicRMWInst::BinOp Kind,
  73. const CallExpr *E) {
  74. QualType T = E->getType();
  75. assert(E->getArg(0)->getType()->isPointerType());
  76. assert(CGF.getContext().hasSameUnqualifiedType(T,
  77. E->getArg(0)->getType()->getPointeeType()));
  78. assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
  79. llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
  80. unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
  81. llvm::IntegerType *IntType =
  82. llvm::IntegerType::get(CGF.getLLVMContext(),
  83. CGF.getContext().getTypeSize(T));
  84. llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
  85. llvm::Value *Args[2];
  86. Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
  87. Args[1] = CGF.EmitScalarExpr(E->getArg(1));
  88. llvm::Type *ValueType = Args[1]->getType();
  89. Args[1] = EmitToInt(CGF, Args[1], T, IntType);
  90. llvm::Value *Result =
  91. CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1],
  92. llvm::SequentiallyConsistent);
  93. return EmitFromInt(CGF, Result, T, ValueType);
  94. }
  95. static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
  96. llvm::AtomicRMWInst::BinOp Kind,
  97. const CallExpr *E) {
  98. return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
  99. }
  100. /// Utility to insert an atomic instruction based Instrinsic::ID and
  101. /// the expression node, where the return value is the result of the
  102. /// operation.
  103. static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
  104. llvm::AtomicRMWInst::BinOp Kind,
  105. const CallExpr *E,
  106. Instruction::BinaryOps Op,
  107. bool Invert = false) {
  108. QualType T = E->getType();
  109. assert(E->getArg(0)->getType()->isPointerType());
  110. assert(CGF.getContext().hasSameUnqualifiedType(T,
  111. E->getArg(0)->getType()->getPointeeType()));
  112. assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
  113. llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
  114. unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
  115. llvm::IntegerType *IntType =
  116. llvm::IntegerType::get(CGF.getLLVMContext(),
  117. CGF.getContext().getTypeSize(T));
  118. llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
  119. llvm::Value *Args[2];
  120. Args[1] = CGF.EmitScalarExpr(E->getArg(1));
  121. llvm::Type *ValueType = Args[1]->getType();
  122. Args[1] = EmitToInt(CGF, Args[1], T, IntType);
  123. Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
  124. llvm::Value *Result =
  125. CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1],
  126. llvm::SequentiallyConsistent);
  127. Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
  128. if (Invert)
  129. Result = CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
  130. llvm::ConstantInt::get(IntType, -1));
  131. Result = EmitFromInt(CGF, Result, T, ValueType);
  132. return RValue::get(Result);
  133. }
  134. /// @brief Utility to insert an atomic cmpxchg instruction.
  135. ///
  136. /// @param CGF The current codegen function.
  137. /// @param E Builtin call expression to convert to cmpxchg.
  138. /// arg0 - address to operate on
  139. /// arg1 - value to compare with
  140. /// arg2 - new value
  141. /// @param ReturnBool Specifies whether to return success flag of
  142. /// cmpxchg result or the old value.
  143. ///
  144. /// @returns result of cmpxchg, according to ReturnBool
  145. static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
  146. bool ReturnBool) {
  147. QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
  148. llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
  149. unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
  150. llvm::IntegerType *IntType = llvm::IntegerType::get(
  151. CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
  152. llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
  153. Value *Args[3];
  154. Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
  155. Args[1] = CGF.EmitScalarExpr(E->getArg(1));
  156. llvm::Type *ValueType = Args[1]->getType();
  157. Args[1] = EmitToInt(CGF, Args[1], T, IntType);
  158. Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
  159. Value *Pair = CGF.Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2],
  160. llvm::SequentiallyConsistent,
  161. llvm::SequentiallyConsistent);
  162. if (ReturnBool)
  163. // Extract boolean success flag and zext it to int.
  164. return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
  165. CGF.ConvertType(E->getType()));
  166. else
  167. // Extract old value and emit it using the same type as compare value.
  168. return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
  169. ValueType);
  170. }
  171. /// EmitFAbs - Emit a call to @llvm.fabs().
  172. static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) {
  173. Value *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
  174. llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
  175. Call->setDoesNotAccessMemory();
  176. return Call;
  177. }
  178. /// Emit the computation of the sign bit for a floating point value. Returns
  179. /// the i1 sign bit value.
  180. static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) {
  181. LLVMContext &C = CGF.CGM.getLLVMContext();
  182. llvm::Type *Ty = V->getType();
  183. int Width = Ty->getPrimitiveSizeInBits();
  184. llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
  185. V = CGF.Builder.CreateBitCast(V, IntTy);
  186. if (Ty->isPPC_FP128Ty()) {
  187. // The higher-order double comes first, and so we need to truncate the
  188. // pair to extract the overall sign. The order of the pair is the same
  189. // in both little- and big-Endian modes.
  190. Width >>= 1;
  191. IntTy = llvm::IntegerType::get(C, Width);
  192. V = CGF.Builder.CreateTrunc(V, IntTy);
  193. }
  194. Value *Zero = llvm::Constant::getNullValue(IntTy);
  195. return CGF.Builder.CreateICmpSLT(V, Zero);
  196. }
  197. static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *Fn,
  198. const CallExpr *E, llvm::Value *calleeValue) {
  199. return CGF.EmitCall(E->getCallee()->getType(), calleeValue, E,
  200. ReturnValueSlot(), Fn);
  201. }
  202. /// \brief Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
  203. /// depending on IntrinsicID.
  204. ///
  205. /// \arg CGF The current codegen function.
  206. /// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
  207. /// \arg X The first argument to the llvm.*.with.overflow.*.
  208. /// \arg Y The second argument to the llvm.*.with.overflow.*.
  209. /// \arg Carry The carry returned by the llvm.*.with.overflow.*.
  210. /// \returns The result (i.e. sum/product) returned by the intrinsic.
  211. static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
  212. const llvm::Intrinsic::ID IntrinsicID,
  213. llvm::Value *X, llvm::Value *Y,
  214. llvm::Value *&Carry) {
  215. // Make sure we have integers of the same width.
  216. assert(X->getType() == Y->getType() &&
  217. "Arguments must be the same type. (Did you forget to make sure both "
  218. "arguments have the same integer width?)");
  219. llvm::Value *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
  220. llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
  221. Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
  222. return CGF.Builder.CreateExtractValue(Tmp, 0);
  223. }
  224. RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
  225. unsigned BuiltinID, const CallExpr *E,
  226. ReturnValueSlot ReturnValue) {
  227. // See if we can constant fold this builtin. If so, don't emit it at all.
  228. Expr::EvalResult Result;
  229. if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
  230. !Result.hasSideEffects()) {
  231. if (Result.Val.isInt())
  232. return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
  233. Result.Val.getInt()));
  234. if (Result.Val.isFloat())
  235. return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
  236. Result.Val.getFloat()));
  237. }
  238. switch (BuiltinID) {
  239. default: break; // Handle intrinsics and libm functions below.
  240. case Builtin::BI__builtin___CFStringMakeConstantString:
  241. case Builtin::BI__builtin___NSStringMakeConstantString:
  242. return RValue::get(CGM.EmitConstantExpr(E, E->getType(), nullptr));
  243. case Builtin::BI__builtin_stdarg_start:
  244. case Builtin::BI__builtin_va_start:
  245. case Builtin::BI__va_start:
  246. case Builtin::BI__builtin_va_end: {
  247. Value *ArgValue = (BuiltinID == Builtin::BI__va_start)
  248. ? EmitScalarExpr(E->getArg(0))
  249. : EmitVAListRef(E->getArg(0));
  250. llvm::Type *DestType = Int8PtrTy;
  251. if (ArgValue->getType() != DestType)
  252. ArgValue = Builder.CreateBitCast(ArgValue, DestType,
  253. ArgValue->getName().data());
  254. Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ?
  255. Intrinsic::vaend : Intrinsic::vastart;
  256. return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue));
  257. }
  258. case Builtin::BI__builtin_va_copy: {
  259. Value *DstPtr = EmitVAListRef(E->getArg(0));
  260. Value *SrcPtr = EmitVAListRef(E->getArg(1));
  261. llvm::Type *Type = Int8PtrTy;
  262. DstPtr = Builder.CreateBitCast(DstPtr, Type);
  263. SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
  264. return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy),
  265. {DstPtr, SrcPtr}));
  266. }
  267. case Builtin::BI__builtin_abs:
  268. case Builtin::BI__builtin_labs:
  269. case Builtin::BI__builtin_llabs: {
  270. Value *ArgValue = EmitScalarExpr(E->getArg(0));
  271. Value *NegOp = Builder.CreateNeg(ArgValue, "neg");
  272. Value *CmpResult =
  273. Builder.CreateICmpSGE(ArgValue,
  274. llvm::Constant::getNullValue(ArgValue->getType()),
  275. "abscond");
  276. Value *Result =
  277. Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs");
  278. return RValue::get(Result);
  279. }
  280. case Builtin::BI__builtin_fabs:
  281. case Builtin::BI__builtin_fabsf:
  282. case Builtin::BI__builtin_fabsl: {
  283. Value *Arg1 = EmitScalarExpr(E->getArg(0));
  284. Value *Result = EmitFAbs(*this, Arg1);
  285. return RValue::get(Result);
  286. }
  287. case Builtin::BI__builtin_fmod:
  288. case Builtin::BI__builtin_fmodf:
  289. case Builtin::BI__builtin_fmodl: {
  290. Value *Arg1 = EmitScalarExpr(E->getArg(0));
  291. Value *Arg2 = EmitScalarExpr(E->getArg(1));
  292. Value *Result = Builder.CreateFRem(Arg1, Arg2, "fmod");
  293. return RValue::get(Result);
  294. }
  295. case Builtin::BI__builtin_conj:
  296. case Builtin::BI__builtin_conjf:
  297. case Builtin::BI__builtin_conjl: {
  298. ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
  299. Value *Real = ComplexVal.first;
  300. Value *Imag = ComplexVal.second;
  301. Value *Zero =
  302. Imag->getType()->isFPOrFPVectorTy()
  303. ? llvm::ConstantFP::getZeroValueForNegation(Imag->getType())
  304. : llvm::Constant::getNullValue(Imag->getType());
  305. Imag = Builder.CreateFSub(Zero, Imag, "sub");
  306. return RValue::getComplex(std::make_pair(Real, Imag));
  307. }
  308. case Builtin::BI__builtin_creal:
  309. case Builtin::BI__builtin_crealf:
  310. case Builtin::BI__builtin_creall:
  311. case Builtin::BIcreal:
  312. case Builtin::BIcrealf:
  313. case Builtin::BIcreall: {
  314. ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
  315. return RValue::get(ComplexVal.first);
  316. }
  317. case Builtin::BI__builtin_cimag:
  318. case Builtin::BI__builtin_cimagf:
  319. case Builtin::BI__builtin_cimagl:
  320. case Builtin::BIcimag:
  321. case Builtin::BIcimagf:
  322. case Builtin::BIcimagl: {
  323. ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
  324. return RValue::get(ComplexVal.second);
  325. }
  326. case Builtin::BI__builtin_ctzs:
  327. case Builtin::BI__builtin_ctz:
  328. case Builtin::BI__builtin_ctzl:
  329. case Builtin::BI__builtin_ctzll: {
  330. Value *ArgValue = EmitScalarExpr(E->getArg(0));
  331. llvm::Type *ArgType = ArgValue->getType();
  332. Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
  333. llvm::Type *ResultType = ConvertType(E->getType());
  334. Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
  335. Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
  336. if (Result->getType() != ResultType)
  337. Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
  338. "cast");
  339. return RValue::get(Result);
  340. }
  341. case Builtin::BI__builtin_clzs:
  342. case Builtin::BI__builtin_clz:
  343. case Builtin::BI__builtin_clzl:
  344. case Builtin::BI__builtin_clzll: {
  345. Value *ArgValue = EmitScalarExpr(E->getArg(0));
  346. llvm::Type *ArgType = ArgValue->getType();
  347. Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
  348. llvm::Type *ResultType = ConvertType(E->getType());
  349. Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
  350. Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
  351. if (Result->getType() != ResultType)
  352. Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
  353. "cast");
  354. return RValue::get(Result);
  355. }
  356. case Builtin::BI__builtin_ffs:
  357. case Builtin::BI__builtin_ffsl:
  358. case Builtin::BI__builtin_ffsll: {
  359. // ffs(x) -> x ? cttz(x) + 1 : 0
  360. Value *ArgValue = EmitScalarExpr(E->getArg(0));
  361. llvm::Type *ArgType = ArgValue->getType();
  362. Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
  363. llvm::Type *ResultType = ConvertType(E->getType());
  364. Value *Tmp =
  365. Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
  366. llvm::ConstantInt::get(ArgType, 1));
  367. Value *Zero = llvm::Constant::getNullValue(ArgType);
  368. Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
  369. Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
  370. if (Result->getType() != ResultType)
  371. Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
  372. "cast");
  373. return RValue::get(Result);
  374. }
  375. case Builtin::BI__builtin_parity:
  376. case Builtin::BI__builtin_parityl:
  377. case Builtin::BI__builtin_parityll: {
  378. // parity(x) -> ctpop(x) & 1
  379. Value *ArgValue = EmitScalarExpr(E->getArg(0));
  380. llvm::Type *ArgType = ArgValue->getType();
  381. Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
  382. llvm::Type *ResultType = ConvertType(E->getType());
  383. Value *Tmp = Builder.CreateCall(F, ArgValue);
  384. Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
  385. if (Result->getType() != ResultType)
  386. Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
  387. "cast");
  388. return RValue::get(Result);
  389. }
  390. case Builtin::BI__builtin_popcount:
  391. case Builtin::BI__builtin_popcountl:
  392. case Builtin::BI__builtin_popcountll: {
  393. Value *ArgValue = EmitScalarExpr(E->getArg(0));
  394. llvm::Type *ArgType = ArgValue->getType();
  395. Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
  396. llvm::Type *ResultType = ConvertType(E->getType());
  397. Value *Result = Builder.CreateCall(F, ArgValue);
  398. if (Result->getType() != ResultType)
  399. Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
  400. "cast");
  401. return RValue::get(Result);
  402. }
  403. case Builtin::BI__builtin_expect: {
  404. Value *ArgValue = EmitScalarExpr(E->getArg(0));
  405. llvm::Type *ArgType = ArgValue->getType();
  406. Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
  407. // Don't generate llvm.expect on -O0 as the backend won't use it for
  408. // anything.
  409. // Note, we still IRGen ExpectedValue because it could have side-effects.
  410. if (CGM.getCodeGenOpts().OptimizationLevel == 0)
  411. return RValue::get(ArgValue);
  412. Value *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
  413. Value *Result =
  414. Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
  415. return RValue::get(Result);
  416. }
  417. case Builtin::BI__builtin_assume_aligned: {
  418. Value *PtrValue = EmitScalarExpr(E->getArg(0));
  419. Value *OffsetValue =
  420. (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
  421. Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
  422. ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
  423. unsigned Alignment = (unsigned) AlignmentCI->getZExtValue();
  424. EmitAlignmentAssumption(PtrValue, Alignment, OffsetValue);
  425. return RValue::get(PtrValue);
  426. }
  427. case Builtin::BI__assume:
  428. case Builtin::BI__builtin_assume: {
  429. if (E->getArg(0)->HasSideEffects(getContext()))
  430. return RValue::get(nullptr);
  431. Value *ArgValue = EmitScalarExpr(E->getArg(0));
  432. Value *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
  433. return RValue::get(Builder.CreateCall(FnAssume, ArgValue));
  434. }
  435. case Builtin::BI__builtin_bswap16:
  436. case Builtin::BI__builtin_bswap32:
  437. case Builtin::BI__builtin_bswap64: {
  438. Value *ArgValue = EmitScalarExpr(E->getArg(0));
  439. llvm::Type *ArgType = ArgValue->getType();
  440. Value *F = CGM.getIntrinsic(Intrinsic::bswap, ArgType);
  441. return RValue::get(Builder.CreateCall(F, ArgValue));
  442. }
  443. case Builtin::BI__builtin_object_size: {
  444. // We rely on constant folding to deal with expressions with side effects.
  445. assert(!E->getArg(0)->HasSideEffects(getContext()) &&
  446. "should have been constant folded");
  447. // We pass this builtin onto the optimizer so that it can
  448. // figure out the object size in more complex cases.
  449. llvm::Type *ResType = ConvertType(E->getType());
  450. // LLVM only supports 0 and 2, make sure that we pass along that
  451. // as a boolean.
  452. Value *Ty = EmitScalarExpr(E->getArg(1));
  453. ConstantInt *CI = dyn_cast<ConstantInt>(Ty);
  454. assert(CI);
  455. uint64_t val = CI->getZExtValue();
  456. CI = ConstantInt::get(Builder.getInt1Ty(), (val & 0x2) >> 1);
  457. // FIXME: Get right address space.
  458. llvm::Type *Tys[] = { ResType, Builder.getInt8PtrTy(0) };
  459. Value *F = CGM.getIntrinsic(Intrinsic::objectsize, Tys);
  460. return RValue::get(
  461. Builder.CreateCall(F, {EmitScalarExpr(E->getArg(0)), CI}));
  462. }
  463. case Builtin::BI__builtin_prefetch: {
  464. Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
  465. // FIXME: Technically these constants should of type 'int', yes?
  466. RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
  467. llvm::ConstantInt::get(Int32Ty, 0);
  468. Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
  469. llvm::ConstantInt::get(Int32Ty, 3);
  470. Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
  471. Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
  472. return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data}));
  473. }
  474. case Builtin::BI__builtin_readcyclecounter: {
  475. Value *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
  476. return RValue::get(Builder.CreateCall(F));
  477. }
  478. case Builtin::BI__builtin___clear_cache: {
  479. Value *Begin = EmitScalarExpr(E->getArg(0));
  480. Value *End = EmitScalarExpr(E->getArg(1));
  481. Value *F = CGM.getIntrinsic(Intrinsic::clear_cache);
  482. return RValue::get(Builder.CreateCall(F, {Begin, End}));
  483. }
  484. case Builtin::BI__builtin_trap:
  485. return RValue::get(EmitTrapCall(Intrinsic::trap));
  486. case Builtin::BI__debugbreak:
  487. return RValue::get(EmitTrapCall(Intrinsic::debugtrap));
  488. case Builtin::BI__builtin_unreachable: {
  489. if (SanOpts.has(SanitizerKind::Unreachable)) {
  490. SanitizerScope SanScope(this);
  491. EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
  492. SanitizerKind::Unreachable),
  493. "builtin_unreachable", EmitCheckSourceLocation(E->getExprLoc()),
  494. None);
  495. } else
  496. Builder.CreateUnreachable();
  497. // We do need to preserve an insertion point.
  498. EmitBlock(createBasicBlock("unreachable.cont"));
  499. return RValue::get(nullptr);
  500. }
  501. case Builtin::BI__builtin_powi:
  502. case Builtin::BI__builtin_powif:
  503. case Builtin::BI__builtin_powil: {
  504. Value *Base = EmitScalarExpr(E->getArg(0));
  505. Value *Exponent = EmitScalarExpr(E->getArg(1));
  506. llvm::Type *ArgType = Base->getType();
  507. Value *F = CGM.getIntrinsic(Intrinsic::powi, ArgType);
  508. return RValue::get(Builder.CreateCall(F, {Base, Exponent}));
  509. }
  510. case Builtin::BI__builtin_isgreater:
  511. case Builtin::BI__builtin_isgreaterequal:
  512. case Builtin::BI__builtin_isless:
  513. case Builtin::BI__builtin_islessequal:
  514. case Builtin::BI__builtin_islessgreater:
  515. case Builtin::BI__builtin_isunordered: {
  516. // Ordered comparisons: we know the arguments to these are matching scalar
  517. // floating point values.
  518. Value *LHS = EmitScalarExpr(E->getArg(0));
  519. Value *RHS = EmitScalarExpr(E->getArg(1));
  520. switch (BuiltinID) {
  521. default: llvm_unreachable("Unknown ordered comparison");
  522. case Builtin::BI__builtin_isgreater:
  523. LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
  524. break;
  525. case Builtin::BI__builtin_isgreaterequal:
  526. LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
  527. break;
  528. case Builtin::BI__builtin_isless:
  529. LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
  530. break;
  531. case Builtin::BI__builtin_islessequal:
  532. LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
  533. break;
  534. case Builtin::BI__builtin_islessgreater:
  535. LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
  536. break;
  537. case Builtin::BI__builtin_isunordered:
  538. LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
  539. break;
  540. }
  541. // ZExt bool to int type.
  542. return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
  543. }
  544. case Builtin::BI__builtin_isnan: {
  545. Value *V = EmitScalarExpr(E->getArg(0));
  546. V = Builder.CreateFCmpUNO(V, V, "cmp");
  547. return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
  548. }
  549. case Builtin::BI__builtin_isinf: {
  550. // isinf(x) --> fabs(x) == infinity
  551. Value *V = EmitScalarExpr(E->getArg(0));
  552. V = EmitFAbs(*this, V);
  553. V = Builder.CreateFCmpOEQ(V, ConstantFP::getInfinity(V->getType()),"isinf");
  554. return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
  555. }
  556. case Builtin::BI__builtin_isinf_sign: {
  557. // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
  558. Value *Arg = EmitScalarExpr(E->getArg(0));
  559. Value *AbsArg = EmitFAbs(*this, Arg);
  560. Value *IsInf = Builder.CreateFCmpOEQ(
  561. AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
  562. Value *IsNeg = EmitSignBit(*this, Arg);
  563. llvm::Type *IntTy = ConvertType(E->getType());
  564. Value *Zero = Constant::getNullValue(IntTy);
  565. Value *One = ConstantInt::get(IntTy, 1);
  566. Value *NegativeOne = ConstantInt::get(IntTy, -1);
  567. Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
  568. Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
  569. return RValue::get(Result);
  570. }
  571. case Builtin::BI__builtin_isnormal: {
  572. // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
  573. Value *V = EmitScalarExpr(E->getArg(0));
  574. Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
  575. Value *Abs = EmitFAbs(*this, V);
  576. Value *IsLessThanInf =
  577. Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
  578. APFloat Smallest = APFloat::getSmallestNormalized(
  579. getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
  580. Value *IsNormal =
  581. Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
  582. "isnormal");
  583. V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
  584. V = Builder.CreateAnd(V, IsNormal, "and");
  585. return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
  586. }
  587. case Builtin::BI__builtin_isfinite: {
  588. // isfinite(x) --> x == x && fabs(x) != infinity;
  589. Value *V = EmitScalarExpr(E->getArg(0));
  590. Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
  591. Value *Abs = EmitFAbs(*this, V);
  592. Value *IsNotInf =
  593. Builder.CreateFCmpUNE(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
  594. V = Builder.CreateAnd(Eq, IsNotInf, "and");
  595. return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
  596. }
  597. case Builtin::BI__builtin_fpclassify: {
  598. Value *V = EmitScalarExpr(E->getArg(5));
  599. llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
  600. // Create Result
  601. BasicBlock *Begin = Builder.GetInsertBlock();
  602. BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
  603. Builder.SetInsertPoint(End);
  604. PHINode *Result =
  605. Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
  606. "fpclassify_result");
  607. // if (V==0) return FP_ZERO
  608. Builder.SetInsertPoint(Begin);
  609. Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
  610. "iszero");
  611. Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
  612. BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
  613. Builder.CreateCondBr(IsZero, End, NotZero);
  614. Result->addIncoming(ZeroLiteral, Begin);
  615. // if (V != V) return FP_NAN
  616. Builder.SetInsertPoint(NotZero);
  617. Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
  618. Value *NanLiteral = EmitScalarExpr(E->getArg(0));
  619. BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
  620. Builder.CreateCondBr(IsNan, End, NotNan);
  621. Result->addIncoming(NanLiteral, NotZero);
  622. // if (fabs(V) == infinity) return FP_INFINITY
  623. Builder.SetInsertPoint(NotNan);
  624. Value *VAbs = EmitFAbs(*this, V);
  625. Value *IsInf =
  626. Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
  627. "isinf");
  628. Value *InfLiteral = EmitScalarExpr(E->getArg(1));
  629. BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
  630. Builder.CreateCondBr(IsInf, End, NotInf);
  631. Result->addIncoming(InfLiteral, NotNan);
  632. // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
  633. Builder.SetInsertPoint(NotInf);
  634. APFloat Smallest = APFloat::getSmallestNormalized(
  635. getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
  636. Value *IsNormal =
  637. Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
  638. "isnormal");
  639. Value *NormalResult =
  640. Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
  641. EmitScalarExpr(E->getArg(3)));
  642. Builder.CreateBr(End);
  643. Result->addIncoming(NormalResult, NotInf);
  644. // return Result
  645. Builder.SetInsertPoint(End);
  646. return RValue::get(Result);
  647. }
  648. case Builtin::BIalloca:
  649. case Builtin::BI_alloca:
  650. case Builtin::BI__builtin_alloca: {
  651. Value *Size = EmitScalarExpr(E->getArg(0));
  652. return RValue::get(Builder.CreateAlloca(Builder.getInt8Ty(), Size));
  653. }
  654. case Builtin::BIbzero:
  655. case Builtin::BI__builtin_bzero: {
  656. std::pair<llvm::Value*, unsigned> Dest =
  657. EmitPointerWithAlignment(E->getArg(0));
  658. Value *SizeVal = EmitScalarExpr(E->getArg(1));
  659. EmitNonNullArgCheck(RValue::get(Dest.first), E->getArg(0)->getType(),
  660. E->getArg(0)->getExprLoc(), FD, 0);
  661. Builder.CreateMemSet(Dest.first, Builder.getInt8(0), SizeVal,
  662. Dest.second, false);
  663. return RValue::get(Dest.first);
  664. }
  665. case Builtin::BImemcpy:
  666. case Builtin::BI__builtin_memcpy: {
  667. std::pair<llvm::Value*, unsigned> Dest =
  668. EmitPointerWithAlignment(E->getArg(0));
  669. std::pair<llvm::Value*, unsigned> Src =
  670. EmitPointerWithAlignment(E->getArg(1));
  671. Value *SizeVal = EmitScalarExpr(E->getArg(2));
  672. unsigned Align = std::min(Dest.second, Src.second);
  673. EmitNonNullArgCheck(RValue::get(Dest.first), E->getArg(0)->getType(),
  674. E->getArg(0)->getExprLoc(), FD, 0);
  675. EmitNonNullArgCheck(RValue::get(Src.first), E->getArg(1)->getType(),
  676. E->getArg(1)->getExprLoc(), FD, 1);
  677. Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false);
  678. return RValue::get(Dest.first);
  679. }
  680. case Builtin::BI__builtin___memcpy_chk: {
  681. // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
  682. llvm::APSInt Size, DstSize;
  683. if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
  684. !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
  685. break;
  686. if (Size.ugt(DstSize))
  687. break;
  688. std::pair<llvm::Value*, unsigned> Dest =
  689. EmitPointerWithAlignment(E->getArg(0));
  690. std::pair<llvm::Value*, unsigned> Src =
  691. EmitPointerWithAlignment(E->getArg(1));
  692. Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
  693. unsigned Align = std::min(Dest.second, Src.second);
  694. Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false);
  695. return RValue::get(Dest.first);
  696. }
  697. case Builtin::BI__builtin_objc_memmove_collectable: {
  698. Value *Address = EmitScalarExpr(E->getArg(0));
  699. Value *SrcAddr = EmitScalarExpr(E->getArg(1));
  700. Value *SizeVal = EmitScalarExpr(E->getArg(2));
  701. CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
  702. Address, SrcAddr, SizeVal);
  703. return RValue::get(Address);
  704. }
  705. case Builtin::BI__builtin___memmove_chk: {
  706. // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
  707. llvm::APSInt Size, DstSize;
  708. if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
  709. !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
  710. break;
  711. if (Size.ugt(DstSize))
  712. break;
  713. std::pair<llvm::Value*, unsigned> Dest =
  714. EmitPointerWithAlignment(E->getArg(0));
  715. std::pair<llvm::Value*, unsigned> Src =
  716. EmitPointerWithAlignment(E->getArg(1));
  717. Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
  718. unsigned Align = std::min(Dest.second, Src.second);
  719. Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false);
  720. return RValue::get(Dest.first);
  721. }
  722. case Builtin::BImemmove:
  723. case Builtin::BI__builtin_memmove: {
  724. std::pair<llvm::Value*, unsigned> Dest =
  725. EmitPointerWithAlignment(E->getArg(0));
  726. std::pair<llvm::Value*, unsigned> Src =
  727. EmitPointerWithAlignment(E->getArg(1));
  728. Value *SizeVal = EmitScalarExpr(E->getArg(2));
  729. unsigned Align = std::min(Dest.second, Src.second);
  730. EmitNonNullArgCheck(RValue::get(Dest.first), E->getArg(0)->getType(),
  731. E->getArg(0)->getExprLoc(), FD, 0);
  732. EmitNonNullArgCheck(RValue::get(Src.first), E->getArg(1)->getType(),
  733. E->getArg(1)->getExprLoc(), FD, 1);
  734. Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false);
  735. return RValue::get(Dest.first);
  736. }
  737. case Builtin::BImemset:
  738. case Builtin::BI__builtin_memset: {
  739. std::pair<llvm::Value*, unsigned> Dest =
  740. EmitPointerWithAlignment(E->getArg(0));
  741. Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
  742. Builder.getInt8Ty());
  743. Value *SizeVal = EmitScalarExpr(E->getArg(2));
  744. EmitNonNullArgCheck(RValue::get(Dest.first), E->getArg(0)->getType(),
  745. E->getArg(0)->getExprLoc(), FD, 0);
  746. Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false);
  747. return RValue::get(Dest.first);
  748. }
  749. case Builtin::BI__builtin___memset_chk: {
  750. // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
  751. llvm::APSInt Size, DstSize;
  752. if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
  753. !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
  754. break;
  755. if (Size.ugt(DstSize))
  756. break;
  757. std::pair<llvm::Value*, unsigned> Dest =
  758. EmitPointerWithAlignment(E->getArg(0));
  759. Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
  760. Builder.getInt8Ty());
  761. Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
  762. Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false);
  763. return RValue::get(Dest.first);
  764. }
  765. case Builtin::BI__builtin_dwarf_cfa: {
  766. // The offset in bytes from the first argument to the CFA.
  767. //
  768. // Why on earth is this in the frontend? Is there any reason at
  769. // all that the backend can't reasonably determine this while
  770. // lowering llvm.eh.dwarf.cfa()?
  771. //
  772. // TODO: If there's a satisfactory reason, add a target hook for
  773. // this instead of hard-coding 0, which is correct for most targets.
  774. int32_t Offset = 0;
  775. Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
  776. return RValue::get(Builder.CreateCall(F,
  777. llvm::ConstantInt::get(Int32Ty, Offset)));
  778. }
  779. case Builtin::BI__builtin_return_address: {
  780. Value *Depth =
  781. CGM.EmitConstantExpr(E->getArg(0), getContext().UnsignedIntTy, this);
  782. Value *F = CGM.getIntrinsic(Intrinsic::returnaddress);
  783. return RValue::get(Builder.CreateCall(F, Depth));
  784. }
  785. case Builtin::BI__builtin_frame_address: {
  786. Value *Depth =
  787. CGM.EmitConstantExpr(E->getArg(0), getContext().UnsignedIntTy, this);
  788. Value *F = CGM.getIntrinsic(Intrinsic::frameaddress);
  789. return RValue::get(Builder.CreateCall(F, Depth));
  790. }
  791. case Builtin::BI__builtin_extract_return_addr: {
  792. Value *Address = EmitScalarExpr(E->getArg(0));
  793. Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
  794. return RValue::get(Result);
  795. }
  796. case Builtin::BI__builtin_frob_return_addr: {
  797. Value *Address = EmitScalarExpr(E->getArg(0));
  798. Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
  799. return RValue::get(Result);
  800. }
  801. case Builtin::BI__builtin_dwarf_sp_column: {
  802. llvm::IntegerType *Ty
  803. = cast<llvm::IntegerType>(ConvertType(E->getType()));
  804. int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
  805. if (Column == -1) {
  806. CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
  807. return RValue::get(llvm::UndefValue::get(Ty));
  808. }
  809. return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
  810. }
  811. case Builtin::BI__builtin_init_dwarf_reg_size_table: {
  812. Value *Address = EmitScalarExpr(E->getArg(0));
  813. if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
  814. CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
  815. return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
  816. }
  817. case Builtin::BI__builtin_eh_return: {
  818. Value *Int = EmitScalarExpr(E->getArg(0));
  819. Value *Ptr = EmitScalarExpr(E->getArg(1));
  820. llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
  821. assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
  822. "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
  823. Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32
  824. ? Intrinsic::eh_return_i32
  825. : Intrinsic::eh_return_i64);
  826. Builder.CreateCall(F, {Int, Ptr});
  827. Builder.CreateUnreachable();
  828. // We do need to preserve an insertion point.
  829. EmitBlock(createBasicBlock("builtin_eh_return.cont"));
  830. return RValue::get(nullptr);
  831. }
  832. case Builtin::BI__builtin_unwind_init: {
  833. Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
  834. return RValue::get(Builder.CreateCall(F));
  835. }
  836. case Builtin::BI__builtin_extend_pointer: {
  837. // Extends a pointer to the size of an _Unwind_Word, which is
  838. // uint64_t on all platforms. Generally this gets poked into a
  839. // register and eventually used as an address, so if the
  840. // addressing registers are wider than pointers and the platform
  841. // doesn't implicitly ignore high-order bits when doing
  842. // addressing, we need to make sure we zext / sext based on
  843. // the platform's expectations.
  844. //
  845. // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
  846. // Cast the pointer to intptr_t.
  847. Value *Ptr = EmitScalarExpr(E->getArg(0));
  848. Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
  849. // If that's 64 bits, we're done.
  850. if (IntPtrTy->getBitWidth() == 64)
  851. return RValue::get(Result);
  852. // Otherwise, ask the codegen data what to do.
  853. if (getTargetHooks().extendPointerWithSExt())
  854. return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
  855. else
  856. return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
  857. }
  858. case Builtin::BI__builtin_setjmp: {
  859. // Buffer is a void**.
  860. Value *Buf = EmitScalarExpr(E->getArg(0));
  861. // Store the frame pointer to the setjmp buffer.
  862. Value *FrameAddr =
  863. Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress),
  864. ConstantInt::get(Int32Ty, 0));
  865. Builder.CreateStore(FrameAddr, Buf);
  866. // Store the stack pointer to the setjmp buffer.
  867. Value *StackAddr =
  868. Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
  869. Value *StackSaveSlot =
  870. Builder.CreateGEP(Buf, ConstantInt::get(Int32Ty, 2));
  871. Builder.CreateStore(StackAddr, StackSaveSlot);
  872. // Call LLVM's EH setjmp, which is lightweight.
  873. Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
  874. Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
  875. return RValue::get(Builder.CreateCall(F, Buf));
  876. }
  877. case Builtin::BI__builtin_longjmp: {
  878. Value *Buf = EmitScalarExpr(E->getArg(0));
  879. Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
  880. // Call LLVM's EH longjmp, which is lightweight.
  881. Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
  882. // longjmp doesn't return; mark this as unreachable.
  883. Builder.CreateUnreachable();
  884. // We do need to preserve an insertion point.
  885. EmitBlock(createBasicBlock("longjmp.cont"));
  886. return RValue::get(nullptr);
  887. }
  888. case Builtin::BI__sync_fetch_and_add:
  889. case Builtin::BI__sync_fetch_and_sub:
  890. case Builtin::BI__sync_fetch_and_or:
  891. case Builtin::BI__sync_fetch_and_and:
  892. case Builtin::BI__sync_fetch_and_xor:
  893. case Builtin::BI__sync_fetch_and_nand:
  894. case Builtin::BI__sync_add_and_fetch:
  895. case Builtin::BI__sync_sub_and_fetch:
  896. case Builtin::BI__sync_and_and_fetch:
  897. case Builtin::BI__sync_or_and_fetch:
  898. case Builtin::BI__sync_xor_and_fetch:
  899. case Builtin::BI__sync_nand_and_fetch:
  900. case Builtin::BI__sync_val_compare_and_swap:
  901. case Builtin::BI__sync_bool_compare_and_swap:
  902. case Builtin::BI__sync_lock_test_and_set:
  903. case Builtin::BI__sync_lock_release:
  904. case Builtin::BI__sync_swap:
  905. llvm_unreachable("Shouldn't make it through sema");
  906. case Builtin::BI__sync_fetch_and_add_1:
  907. case Builtin::BI__sync_fetch_and_add_2:
  908. case Builtin::BI__sync_fetch_and_add_4:
  909. case Builtin::BI__sync_fetch_and_add_8:
  910. case Builtin::BI__sync_fetch_and_add_16:
  911. return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
  912. case Builtin::BI__sync_fetch_and_sub_1:
  913. case Builtin::BI__sync_fetch_and_sub_2:
  914. case Builtin::BI__sync_fetch_and_sub_4:
  915. case Builtin::BI__sync_fetch_and_sub_8:
  916. case Builtin::BI__sync_fetch_and_sub_16:
  917. return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
  918. case Builtin::BI__sync_fetch_and_or_1:
  919. case Builtin::BI__sync_fetch_and_or_2:
  920. case Builtin::BI__sync_fetch_and_or_4:
  921. case Builtin::BI__sync_fetch_and_or_8:
  922. case Builtin::BI__sync_fetch_and_or_16:
  923. return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
  924. case Builtin::BI__sync_fetch_and_and_1:
  925. case Builtin::BI__sync_fetch_and_and_2:
  926. case Builtin::BI__sync_fetch_and_and_4:
  927. case Builtin::BI__sync_fetch_and_and_8:
  928. case Builtin::BI__sync_fetch_and_and_16:
  929. return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
  930. case Builtin::BI__sync_fetch_and_xor_1:
  931. case Builtin::BI__sync_fetch_and_xor_2:
  932. case Builtin::BI__sync_fetch_and_xor_4:
  933. case Builtin::BI__sync_fetch_and_xor_8:
  934. case Builtin::BI__sync_fetch_and_xor_16:
  935. return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
  936. case Builtin::BI__sync_fetch_and_nand_1:
  937. case Builtin::BI__sync_fetch_and_nand_2:
  938. case Builtin::BI__sync_fetch_and_nand_4:
  939. case Builtin::BI__sync_fetch_and_nand_8:
  940. case Builtin::BI__sync_fetch_and_nand_16:
  941. return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
  942. // Clang extensions: not overloaded yet.
  943. case Builtin::BI__sync_fetch_and_min:
  944. return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
  945. case Builtin::BI__sync_fetch_and_max:
  946. return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
  947. case Builtin::BI__sync_fetch_and_umin:
  948. return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
  949. case Builtin::BI__sync_fetch_and_umax:
  950. return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
  951. case Builtin::BI__sync_add_and_fetch_1:
  952. case Builtin::BI__sync_add_and_fetch_2:
  953. case Builtin::BI__sync_add_and_fetch_4:
  954. case Builtin::BI__sync_add_and_fetch_8:
  955. case Builtin::BI__sync_add_and_fetch_16:
  956. return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
  957. llvm::Instruction::Add);
  958. case Builtin::BI__sync_sub_and_fetch_1:
  959. case Builtin::BI__sync_sub_and_fetch_2:
  960. case Builtin::BI__sync_sub_and_fetch_4:
  961. case Builtin::BI__sync_sub_and_fetch_8:
  962. case Builtin::BI__sync_sub_and_fetch_16:
  963. return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
  964. llvm::Instruction::Sub);
  965. case Builtin::BI__sync_and_and_fetch_1:
  966. case Builtin::BI__sync_and_and_fetch_2:
  967. case Builtin::BI__sync_and_and_fetch_4:
  968. case Builtin::BI__sync_and_and_fetch_8:
  969. case Builtin::BI__sync_and_and_fetch_16:
  970. return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
  971. llvm::Instruction::And);
  972. case Builtin::BI__sync_or_and_fetch_1:
  973. case Builtin::BI__sync_or_and_fetch_2:
  974. case Builtin::BI__sync_or_and_fetch_4:
  975. case Builtin::BI__sync_or_and_fetch_8:
  976. case Builtin::BI__sync_or_and_fetch_16:
  977. return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
  978. llvm::Instruction::Or);
  979. case Builtin::BI__sync_xor_and_fetch_1:
  980. case Builtin::BI__sync_xor_and_fetch_2:
  981. case Builtin::BI__sync_xor_and_fetch_4:
  982. case Builtin::BI__sync_xor_and_fetch_8:
  983. case Builtin::BI__sync_xor_and_fetch_16:
  984. return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
  985. llvm::Instruction::Xor);
  986. case Builtin::BI__sync_nand_and_fetch_1:
  987. case Builtin::BI__sync_nand_and_fetch_2:
  988. case Builtin::BI__sync_nand_and_fetch_4:
  989. case Builtin::BI__sync_nand_and_fetch_8:
  990. case Builtin::BI__sync_nand_and_fetch_16:
  991. return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
  992. llvm::Instruction::And, true);
  993. case Builtin::BI__sync_val_compare_and_swap_1:
  994. case Builtin::BI__sync_val_compare_and_swap_2:
  995. case Builtin::BI__sync_val_compare_and_swap_4:
  996. case Builtin::BI__sync_val_compare_and_swap_8:
  997. case Builtin::BI__sync_val_compare_and_swap_16:
  998. return RValue::get(MakeAtomicCmpXchgValue(*this, E, false));
  999. case Builtin::BI__sync_bool_compare_and_swap_1:
  1000. case Builtin::BI__sync_bool_compare_and_swap_2:
  1001. case Builtin::BI__sync_bool_compare_and_swap_4:
  1002. case Builtin::BI__sync_bool_compare_and_swap_8:
  1003. case Builtin::BI__sync_bool_compare_and_swap_16:
  1004. return RValue::get(MakeAtomicCmpXchgValue(*this, E, true));
  1005. case Builtin::BI__sync_swap_1:
  1006. case Builtin::BI__sync_swap_2:
  1007. case Builtin::BI__sync_swap_4:
  1008. case Builtin::BI__sync_swap_8:
  1009. case Builtin::BI__sync_swap_16:
  1010. return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
  1011. case Builtin::BI__sync_lock_test_and_set_1:
  1012. case Builtin::BI__sync_lock_test_and_set_2:
  1013. case Builtin::BI__sync_lock_test_and_set_4:
  1014. case Builtin::BI__sync_lock_test_and_set_8:
  1015. case Builtin::BI__sync_lock_test_and_set_16:
  1016. return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
  1017. case Builtin::BI__sync_lock_release_1:
  1018. case Builtin::BI__sync_lock_release_2:
  1019. case Builtin::BI__sync_lock_release_4:
  1020. case Builtin::BI__sync_lock_release_8:
  1021. case Builtin::BI__sync_lock_release_16: {
  1022. Value *Ptr = EmitScalarExpr(E->getArg(0));
  1023. QualType ElTy = E->getArg(0)->getType()->getPointeeType();
  1024. CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
  1025. llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
  1026. StoreSize.getQuantity() * 8);
  1027. Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
  1028. llvm::StoreInst *Store =
  1029. Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr);
  1030. Store->setAlignment(StoreSize.getQuantity());
  1031. Store->setAtomic(llvm::Release);
  1032. return RValue::get(nullptr);
  1033. }
  1034. case Builtin::BI__sync_synchronize: {
  1035. // We assume this is supposed to correspond to a C++0x-style
  1036. // sequentially-consistent fence (i.e. this is only usable for
  1037. // synchonization, not device I/O or anything like that). This intrinsic
  1038. // is really badly designed in the sense that in theory, there isn't
  1039. // any way to safely use it... but in practice, it mostly works
  1040. // to use it with non-atomic loads and stores to get acquire/release
  1041. // semantics.
  1042. Builder.CreateFence(llvm::SequentiallyConsistent);
  1043. return RValue::get(nullptr);
  1044. }
  1045. case Builtin::BI__c11_atomic_is_lock_free:
  1046. case Builtin::BI__atomic_is_lock_free: {
  1047. // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
  1048. // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
  1049. // _Atomic(T) is always properly-aligned.
  1050. const char *LibCallName = "__atomic_is_lock_free";
  1051. CallArgList Args;
  1052. Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
  1053. getContext().getSizeType());
  1054. if (BuiltinID == Builtin::BI__atomic_is_lock_free)
  1055. Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
  1056. getContext().VoidPtrTy);
  1057. else
  1058. Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
  1059. getContext().VoidPtrTy);
  1060. const CGFunctionInfo &FuncInfo =
  1061. CGM.getTypes().arrangeFreeFunctionCall(E->getType(), Args,
  1062. FunctionType::ExtInfo(),
  1063. RequiredArgs::All);
  1064. llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
  1065. llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
  1066. return EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
  1067. }
  1068. case Builtin::BI__atomic_test_and_set: {
  1069. // Look at the argument type to determine whether this is a volatile
  1070. // operation. The parameter type is always volatile.
  1071. QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
  1072. bool Volatile =
  1073. PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
  1074. Value *Ptr = EmitScalarExpr(E->getArg(0));
  1075. unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
  1076. Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
  1077. Value *NewVal = Builder.getInt8(1);
  1078. Value *Order = EmitScalarExpr(E->getArg(1));
  1079. if (isa<llvm::ConstantInt>(Order)) {
  1080. int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
  1081. AtomicRMWInst *Result = nullptr;
  1082. switch (ord) {
  1083. case 0: // memory_order_relaxed
  1084. default: // invalid order
  1085. Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
  1086. Ptr, NewVal,
  1087. llvm::Monotonic);
  1088. break;
  1089. case 1: // memory_order_consume
  1090. case 2: // memory_order_acquire
  1091. Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
  1092. Ptr, NewVal,
  1093. llvm::Acquire);
  1094. break;
  1095. case 3: // memory_order_release
  1096. Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
  1097. Ptr, NewVal,
  1098. llvm::Release);
  1099. break;
  1100. case 4: // memory_order_acq_rel
  1101. Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
  1102. Ptr, NewVal,
  1103. llvm::AcquireRelease);
  1104. break;
  1105. case 5: // memory_order_seq_cst
  1106. Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
  1107. Ptr, NewVal,
  1108. llvm::SequentiallyConsistent);
  1109. break;
  1110. }
  1111. Result->setVolatile(Volatile);
  1112. return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
  1113. }
  1114. llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
  1115. llvm::BasicBlock *BBs[5] = {
  1116. createBasicBlock("monotonic", CurFn),
  1117. createBasicBlock("acquire", CurFn),
  1118. createBasicBlock("release", CurFn),
  1119. createBasicBlock("acqrel", CurFn),
  1120. createBasicBlock("seqcst", CurFn)
  1121. };
  1122. llvm::AtomicOrdering Orders[5] = {
  1123. llvm::Monotonic, llvm::Acquire, llvm::Release,
  1124. llvm::AcquireRelease, llvm::SequentiallyConsistent
  1125. };
  1126. Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
  1127. llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
  1128. Builder.SetInsertPoint(ContBB);
  1129. PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
  1130. for (unsigned i = 0; i < 5; ++i) {
  1131. Builder.SetInsertPoint(BBs[i]);
  1132. AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
  1133. Ptr, NewVal, Orders[i]);
  1134. RMW->setVolatile(Volatile);
  1135. Result->addIncoming(RMW, BBs[i]);
  1136. Builder.CreateBr(ContBB);
  1137. }
  1138. SI->addCase(Builder.getInt32(0), BBs[0]);
  1139. SI->addCase(Builder.getInt32(1), BBs[1]);
  1140. SI->addCase(Builder.getInt32(2), BBs[1]);
  1141. SI->addCase(Builder.getInt32(3), BBs[2]);
  1142. SI->addCase(Builder.getInt32(4), BBs[3]);
  1143. SI->addCase(Builder.getInt32(5), BBs[4]);
  1144. Builder.SetInsertPoint(ContBB);
  1145. return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
  1146. }
  1147. case Builtin::BI__atomic_clear: {
  1148. QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
  1149. bool Volatile =
  1150. PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
  1151. Value *Ptr = EmitScalarExpr(E->getArg(0));
  1152. unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
  1153. Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
  1154. Value *NewVal = Builder.getInt8(0);
  1155. Value *Order = EmitScalarExpr(E->getArg(1));
  1156. if (isa<llvm::ConstantInt>(Order)) {
  1157. int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
  1158. StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
  1159. Store->setAlignment(1);
  1160. switch (ord) {
  1161. case 0: // memory_order_relaxed
  1162. default: // invalid order
  1163. Store->setOrdering(llvm::Monotonic);
  1164. break;
  1165. case 3: // memory_order_release
  1166. Store->setOrdering(llvm::Release);
  1167. break;
  1168. case 5: // memory_order_seq_cst
  1169. Store->setOrdering(llvm::SequentiallyConsistent);
  1170. break;
  1171. }
  1172. return RValue::get(nullptr);
  1173. }
  1174. llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
  1175. llvm::BasicBlock *BBs[3] = {
  1176. createBasicBlock("monotonic", CurFn),
  1177. createBasicBlock("release", CurFn),
  1178. createBasicBlock("seqcst", CurFn)
  1179. };
  1180. llvm::AtomicOrdering Orders[3] = {
  1181. llvm::Monotonic, llvm::Release, llvm::SequentiallyConsistent
  1182. };
  1183. Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
  1184. llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
  1185. for (unsigned i = 0; i < 3; ++i) {
  1186. Builder.SetInsertPoint(BBs[i]);
  1187. StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
  1188. Store->setAlignment(1);
  1189. Store->setOrdering(Orders[i]);
  1190. Builder.CreateBr(ContBB);
  1191. }
  1192. SI->addCase(Builder.getInt32(0), BBs[0]);
  1193. SI->addCase(Builder.getInt32(3), BBs[1]);
  1194. SI->addCase(Builder.getInt32(5), BBs[2]);
  1195. Builder.SetInsertPoint(ContBB);
  1196. return RValue::get(nullptr);
  1197. }
  1198. case Builtin::BI__atomic_thread_fence:
  1199. case Builtin::BI__atomic_signal_fence:
  1200. case Builtin::BI__c11_atomic_thread_fence:
  1201. case Builtin::BI__c11_atomic_signal_fence: {
  1202. llvm::SynchronizationScope Scope;
  1203. if (BuiltinID == Builtin::BI__atomic_signal_fence ||
  1204. BuiltinID == Builtin::BI__c11_atomic_signal_fence)
  1205. Scope = llvm::SingleThread;
  1206. else
  1207. Scope = llvm::CrossThread;
  1208. Value *Order = EmitScalarExpr(E->getArg(0));
  1209. if (isa<llvm::ConstantInt>(Order)) {
  1210. int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
  1211. switch (ord) {
  1212. case 0: // memory_order_relaxed
  1213. default: // invalid order
  1214. break;
  1215. case 1: // memory_order_consume
  1216. case 2: // memory_order_acquire
  1217. Builder.CreateFence(llvm::Acquire, Scope);
  1218. break;
  1219. case 3: // memory_order_release
  1220. Builder.CreateFence(llvm::Release, Scope);
  1221. break;
  1222. case 4: // memory_order_acq_rel
  1223. Builder.CreateFence(llvm::AcquireRelease, Scope);
  1224. break;
  1225. case 5: // memory_order_seq_cst
  1226. Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
  1227. break;
  1228. }
  1229. return RValue::get(nullptr);
  1230. }
  1231. llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
  1232. AcquireBB = createBasicBlock("acquire", CurFn);
  1233. ReleaseBB = createBasicBlock("release", CurFn);
  1234. AcqRelBB = createBasicBlock("acqrel", CurFn);
  1235. SeqCstBB = createBasicBlock("seqcst", CurFn);
  1236. llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
  1237. Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
  1238. llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
  1239. Builder.SetInsertPoint(AcquireBB);
  1240. Builder.CreateFence(llvm::Acquire, Scope);
  1241. Builder.CreateBr(ContBB);
  1242. SI->addCase(Builder.getInt32(1), AcquireBB);
  1243. SI->addCase(Builder.getInt32(2), AcquireBB);
  1244. Builder.SetInsertPoint(ReleaseBB);
  1245. Builder.CreateFence(llvm::Release, Scope);
  1246. Builder.CreateBr(ContBB);
  1247. SI->addCase(Builder.getInt32(3), ReleaseBB);
  1248. Builder.SetInsertPoint(AcqRelBB);
  1249. Builder.CreateFence(llvm::AcquireRelease, Scope);
  1250. Builder.CreateBr(ContBB);
  1251. SI->addCase(Builder.getInt32(4), AcqRelBB);
  1252. Builder.SetInsertPoint(SeqCstBB);
  1253. Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
  1254. Builder.CreateBr(ContBB);
  1255. SI->addCase(Builder.getInt32(5), SeqCstBB);
  1256. Builder.SetInsertPoint(ContBB);
  1257. return RValue::get(nullptr);
  1258. }
  1259. // Library functions with special handling.
  1260. case Builtin::BIsqrt:
  1261. case Builtin::BIsqrtf:
  1262. case Builtin::BIsqrtl: {
  1263. // Transform a call to sqrt* into a @llvm.sqrt.* intrinsic call, but only
  1264. // in finite- or unsafe-math mode (the intrinsic has different semantics
  1265. // for handling negative numbers compared to the library function, so
  1266. // -fmath-errno=0 is not enough).
  1267. if (!FD->hasAttr<ConstAttr>())
  1268. break;
  1269. if (!(CGM.getCodeGenOpts().UnsafeFPMath ||
  1270. CGM.getCodeGenOpts().NoNaNsFPMath))
  1271. break;
  1272. Value *Arg0 = EmitScalarExpr(E->getArg(0));
  1273. llvm::Type *ArgType = Arg0->getType();
  1274. Value *F = CGM.getIntrinsic(Intrinsic::sqrt, ArgType);
  1275. return RValue::get(Builder.CreateCall(F, Arg0));
  1276. }
  1277. case Builtin::BI__builtin_pow:
  1278. case Builtin::BI__builtin_powf:
  1279. case Builtin::BI__builtin_powl:
  1280. case Builtin::BIpow:
  1281. case Builtin::BIpowf:
  1282. case Builtin::BIpowl: {
  1283. // Transform a call to pow* into a @llvm.pow.* intrinsic call.
  1284. if (!FD->hasAttr<ConstAttr>())
  1285. break;
  1286. Value *Base = EmitScalarExpr(E->getArg(0));
  1287. Value *Exponent = EmitScalarExpr(E->getArg(1));
  1288. llvm::Type *ArgType = Base->getType();
  1289. Value *F = CGM.getIntrinsic(Intrinsic::pow, ArgType);
  1290. return RValue::get(Builder.CreateCall(F, {Base, Exponent}));
  1291. }
  1292. case Builtin::BIfma:
  1293. case Builtin::BIfmaf:
  1294. case Builtin::BIfmal:
  1295. case Builtin::BI__builtin_fma:
  1296. case Builtin::BI__builtin_fmaf:
  1297. case Builtin::BI__builtin_fmal: {
  1298. // Rewrite fma to intrinsic.
  1299. Value *FirstArg = EmitScalarExpr(E->getArg(0));
  1300. llvm::Type *ArgType = FirstArg->getType();
  1301. Value *F = CGM.getIntrinsic(Intrinsic::fma, ArgType);
  1302. return RValue::get(
  1303. Builder.CreateCall(F, {FirstArg, EmitScalarExpr(E->getArg(1)),
  1304. EmitScalarExpr(E->getArg(2))}));
  1305. }
  1306. case Builtin::BI__builtin_signbit:
  1307. case Builtin::BI__builtin_signbitf:
  1308. case Builtin::BI__builtin_signbitl: {
  1309. return RValue::get(
  1310. Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
  1311. ConvertType(E->getType())));
  1312. }
  1313. case Builtin::BI__builtin_annotation: {
  1314. llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
  1315. llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
  1316. AnnVal->getType());
  1317. // Get the annotation string, go through casts. Sema requires this to be a
  1318. // non-wide string literal, potentially casted, so the cast<> is safe.
  1319. const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
  1320. StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
  1321. return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc()));
  1322. }
  1323. case Builtin::BI__builtin_addcb:
  1324. case Builtin::BI__builtin_addcs:
  1325. case Builtin::BI__builtin_addc:
  1326. case Builtin::BI__builtin_addcl:
  1327. case Builtin::BI__builtin_addcll:
  1328. case Builtin::BI__builtin_subcb:
  1329. case Builtin::BI__builtin_subcs:
  1330. case Builtin::BI__builtin_subc:
  1331. case Builtin::BI__builtin_subcl:
  1332. case Builtin::BI__builtin_subcll: {
  1333. // We translate all of these builtins from expressions of the form:
  1334. // int x = ..., y = ..., carryin = ..., carryout, result;
  1335. // result = __builtin_addc(x, y, carryin, &carryout);
  1336. //
  1337. // to LLVM IR of the form:
  1338. //
  1339. // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
  1340. // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
  1341. // %carry1 = extractvalue {i32, i1} %tmp1, 1
  1342. // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
  1343. // i32 %carryin)
  1344. // %result = extractvalue {i32, i1} %tmp2, 0
  1345. // %carry2 = extractvalue {i32, i1} %tmp2, 1
  1346. // %tmp3 = or i1 %carry1, %carry2
  1347. // %tmp4 = zext i1 %tmp3 to i32
  1348. // store i32 %tmp4, i32* %carryout
  1349. // Scalarize our inputs.
  1350. llvm::Value *X = EmitScalarExpr(E->getArg(0));
  1351. llvm::Value *Y = EmitScalarExpr(E->getArg(1));
  1352. llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
  1353. std::pair<llvm::Value*, unsigned> CarryOutPtr =
  1354. EmitPointerWithAlignment(E->getArg(3));
  1355. // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
  1356. llvm::Intrinsic::ID IntrinsicId;
  1357. switch (BuiltinID) {
  1358. default: llvm_unreachable("Unknown multiprecision builtin id.");
  1359. case Builtin::BI__builtin_addcb:
  1360. case Builtin::BI__builtin_addcs:
  1361. case Builtin::BI__builtin_addc:
  1362. case Builtin::BI__builtin_addcl:
  1363. case Builtin::BI__builtin_addcll:
  1364. IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
  1365. break;
  1366. case Builtin::BI__builtin_subcb:
  1367. case Builtin::BI__builtin_subcs:
  1368. case Builtin::BI__builtin_subc:
  1369. case Builtin::BI__builtin_subcl:
  1370. case Builtin::BI__builtin_subcll:
  1371. IntrinsicId = llvm::Intrinsic::usub_with_overflow;
  1372. break;
  1373. }
  1374. // Construct our resulting LLVM IR expression.
  1375. llvm::Value *Carry1;
  1376. llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
  1377. X, Y, Carry1);
  1378. llvm::Value *Carry2;
  1379. llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
  1380. Sum1, Carryin, Carry2);
  1381. llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
  1382. X->getType());
  1383. llvm::StoreInst *CarryOutStore = Builder.CreateStore(CarryOut,
  1384. CarryOutPtr.first);
  1385. CarryOutStore->setAlignment(CarryOutPtr.second);
  1386. return RValue::get(Sum2);
  1387. }
  1388. case Builtin::BI__builtin_uadd_overflow:
  1389. case Builtin::BI__builtin_uaddl_overflow:
  1390. case Builtin::BI__builtin_uaddll_overflow:
  1391. case Builtin::BI__builtin_usub_overflow:
  1392. case Builtin::BI__builtin_usubl_overflow:
  1393. case Builtin::BI__builtin_usubll_overflow:
  1394. case Builtin::BI__builtin_umul_overflow:
  1395. case Builtin::BI__builtin_umull_overflow:
  1396. case Builtin::BI__builtin_umulll_overflow:
  1397. case Builtin::BI__builtin_sadd_overflow:
  1398. case Builtin::BI__builtin_saddl_overflow:
  1399. case Builtin::BI__builtin_saddll_overflow:
  1400. case Builtin::BI__builtin_ssub_overflow:
  1401. case Builtin::BI__builtin_ssubl_overflow:
  1402. case Builtin::BI__builtin_ssubll_overflow:
  1403. case Builtin::BI__builtin_smul_overflow:
  1404. case Builtin::BI__builtin_smull_overflow:
  1405. case Builtin::BI__builtin_smulll_overflow: {
  1406. // We translate all of these builtins directly to the relevant llvm IR node.
  1407. // Scalarize our inputs.
  1408. llvm::Value *X = EmitScalarExpr(E->getArg(0));
  1409. llvm::Value *Y = EmitScalarExpr(E->getArg(1));
  1410. std::pair<llvm::Value *, unsigned> SumOutPtr =
  1411. EmitPointerWithAlignment(E->getArg(2));
  1412. // Decide which of the overflow intrinsics we are lowering to:
  1413. llvm::Intrinsic::ID IntrinsicId;
  1414. switch (BuiltinID) {
  1415. default: llvm_unreachable("Unknown security overflow builtin id.");
  1416. case Builtin::BI__builtin_uadd_overflow:
  1417. case Builtin::BI__builtin_uaddl_overflow:
  1418. case Builtin::BI__builtin_uaddll_overflow:
  1419. IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
  1420. break;
  1421. case Builtin::BI__builtin_usub_overflow:
  1422. case Builtin::BI__builtin_usubl_overflow:
  1423. case Builtin::BI__builtin_usubll_overflow:
  1424. IntrinsicId = llvm::Intrinsic::usub_with_overflow;
  1425. break;
  1426. case Builtin::BI__builtin_umul_overflow:
  1427. case Builtin::BI__builtin_umull_overflow:
  1428. case Builtin::BI__builtin_umulll_overflow:
  1429. IntrinsicId = llvm::Intrinsic::umul_with_overflow;
  1430. break;
  1431. case Builtin::BI__builtin_sadd_overflow:
  1432. case Builtin::BI__builtin_saddl_overflow:
  1433. case Builtin::BI__builtin_saddll_overflow:
  1434. IntrinsicId = llvm::Intrinsic::sadd_with_overflow;
  1435. break;
  1436. case Builtin::BI__builtin_ssub_overflow:
  1437. case Builtin::BI__builtin_ssubl_overflow:
  1438. case Builtin::BI__builtin_ssubll_overflow:
  1439. IntrinsicId = llvm::Intrinsic::ssub_with_overflow;
  1440. break;
  1441. case Builtin::BI__builtin_smul_overflow:
  1442. case Builtin::BI__builtin_smull_overflow:
  1443. case Builtin::BI__builtin_smulll_overflow:
  1444. IntrinsicId = llvm::Intrinsic::smul_with_overflow;
  1445. break;
  1446. }
  1447. llvm::Value *Carry;
  1448. llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
  1449. llvm::StoreInst *SumOutStore = Builder.CreateStore(Sum, SumOutPtr.first);
  1450. SumOutStore->setAlignment(SumOutPtr.second);
  1451. return RValue::get(Carry);
  1452. }
  1453. case Builtin::BI__builtin_addressof:
  1454. return RValue::get(EmitLValue(E->getArg(0)).getAddress());
  1455. case Builtin::BI__builtin_operator_new:
  1456. return EmitBuiltinNewDeleteCall(FD->getType()->castAs<FunctionProtoType>(),
  1457. E->getArg(0), false);
  1458. case Builtin::BI__builtin_operator_delete:
  1459. return EmitBuiltinNewDeleteCall(FD->getType()->castAs<FunctionProtoType>(),
  1460. E->getArg(0), true);
  1461. case Builtin::BI__noop:
  1462. // __noop always evaluates to an integer literal zero.
  1463. return RValue::get(ConstantInt::get(IntTy, 0));
  1464. case Builtin::BI__builtin_call_with_static_chain: {
  1465. const CallExpr *Call = cast<CallExpr>(E->getArg(0));
  1466. const Expr *Chain = E->getArg(1);
  1467. return EmitCall(Call->getCallee()->getType(),
  1468. EmitScalarExpr(Call->getCallee()), Call, ReturnValue,
  1469. Call->getCalleeDecl(), EmitScalarExpr(Chain));
  1470. }
  1471. case Builtin::BI_InterlockedExchange:
  1472. case Builtin::BI_InterlockedExchangePointer:
  1473. return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
  1474. case Builtin::BI_InterlockedCompareExchangePointer: {
  1475. llvm::Type *RTy;
  1476. llvm::IntegerType *IntType =
  1477. IntegerType::get(getLLVMContext(),
  1478. getContext().getTypeSize(E->getType()));
  1479. llvm::Type *IntPtrType = IntType->getPointerTo();
  1480. llvm::Value *Destination =
  1481. Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType);
  1482. llvm::Value *Exchange = EmitScalarExpr(E->getArg(1));
  1483. RTy = Exchange->getType();
  1484. Exchange = Builder.CreatePtrToInt(Exchange, IntType);
  1485. llvm::Value *Comparand =
  1486. Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType);
  1487. auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
  1488. SequentiallyConsistent,
  1489. SequentiallyConsistent);
  1490. Result->setVolatile(true);
  1491. return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result,
  1492. 0),
  1493. RTy));
  1494. }
  1495. case Builtin::BI_InterlockedCompareExchange: {
  1496. AtomicCmpXchgInst *CXI = Builder.CreateAtomicCmpXchg(
  1497. EmitScalarExpr(E->getArg(0)),
  1498. EmitScalarExpr(E->getArg(2)),
  1499. EmitScalarExpr(E->getArg(1)),
  1500. SequentiallyConsistent,
  1501. SequentiallyConsistent);
  1502. CXI->setVolatile(true);
  1503. return RValue::get(Builder.CreateExtractValue(CXI, 0));
  1504. }
  1505. case Builtin::BI_InterlockedIncrement: {
  1506. AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
  1507. AtomicRMWInst::Add,
  1508. EmitScalarExpr(E->getArg(0)),
  1509. ConstantInt::get(Int32Ty, 1),
  1510. llvm::SequentiallyConsistent);
  1511. RMWI->setVolatile(true);
  1512. return RValue::get(Builder.CreateAdd(RMWI, ConstantInt::get(Int32Ty, 1)));
  1513. }
  1514. case Builtin::BI_InterlockedDecrement: {
  1515. AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
  1516. AtomicRMWInst::Sub,
  1517. EmitScalarExpr(E->getArg(0)),
  1518. ConstantInt::get(Int32Ty, 1),
  1519. llvm::SequentiallyConsistent);
  1520. RMWI->setVolatile(true);
  1521. return RValue::get(Builder.CreateSub(RMWI, ConstantInt::get(Int32Ty, 1)));
  1522. }
  1523. case Builtin::BI_InterlockedExchangeAdd: {
  1524. AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
  1525. AtomicRMWInst::Add,
  1526. EmitScalarExpr(E->getArg(0)),
  1527. EmitScalarExpr(E->getArg(1)),
  1528. llvm::SequentiallyConsistent);
  1529. RMWI->setVolatile(true);
  1530. return RValue::get(RMWI);
  1531. }
  1532. case Builtin::BI__readfsdword: {
  1533. Value *IntToPtr =
  1534. Builder.CreateIntToPtr(EmitScalarExpr(E->getArg(0)),
  1535. llvm::PointerType::get(CGM.Int32Ty, 257));
  1536. LoadInst *Load =
  1537. Builder.CreateAlignedLoad(IntToPtr, /*Align=*/4, /*isVolatile=*/true);
  1538. return RValue::get(Load);
  1539. }
  1540. #if 0 // HLSL Change Start - no support for exception handling
  1541. case Builtin::BI__exception_code:
  1542. case Builtin::BI_exception_code:
  1543. return RValue::get(EmitSEHExceptionCode());
  1544. case Builtin::BI__exception_info:
  1545. case Builtin::BI_exception_info:
  1546. return RValue::get(EmitSEHExceptionInfo());
  1547. case Builtin::BI__abnormal_termination:
  1548. case Builtin::BI_abnormal_termination:
  1549. return RValue::get(EmitSEHAbnormalTermination());
  1550. case Builtin::BI_setjmpex: {
  1551. if (getTarget().getTriple().isOSMSVCRT()) {
  1552. llvm::Type *ArgTypes[] = {Int8PtrTy, Int8PtrTy};
  1553. llvm::AttributeSet ReturnsTwiceAttr =
  1554. AttributeSet::get(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
  1555. llvm::Attribute::ReturnsTwice);
  1556. llvm::Constant *SetJmpEx = CGM.CreateRuntimeFunction(
  1557. llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/false),
  1558. "_setjmpex", ReturnsTwiceAttr);
  1559. llvm::Value *Buf = Builder.CreateBitOrPointerCast(
  1560. EmitScalarExpr(E->getArg(0)), Int8PtrTy);
  1561. llvm::Value *FrameAddr =
  1562. Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress),
  1563. ConstantInt::get(Int32Ty, 0));
  1564. llvm::Value *Args[] = {Buf, FrameAddr};
  1565. llvm::CallSite CS = EmitRuntimeCallOrInvoke(SetJmpEx, Args);
  1566. CS.setAttributes(ReturnsTwiceAttr);
  1567. return RValue::get(CS.getInstruction());
  1568. }
  1569. break;
  1570. }
  1571. case Builtin::BI_setjmp: {
  1572. if (getTarget().getTriple().isOSMSVCRT()) {
  1573. llvm::AttributeSet ReturnsTwiceAttr =
  1574. AttributeSet::get(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
  1575. llvm::Attribute::ReturnsTwice);
  1576. llvm::Value *Buf = Builder.CreateBitOrPointerCast(
  1577. EmitScalarExpr(E->getArg(0)), Int8PtrTy);
  1578. llvm::CallSite CS;
  1579. if (getTarget().getTriple().getArch() == llvm::Triple::x86) {
  1580. llvm::Type *ArgTypes[] = {Int8PtrTy, IntTy};
  1581. llvm::Constant *SetJmp3 = CGM.CreateRuntimeFunction(
  1582. llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/true),
  1583. "_setjmp3", ReturnsTwiceAttr);
  1584. llvm::Value *Count = ConstantInt::get(IntTy, 0);
  1585. llvm::Value *Args[] = {Buf, Count};
  1586. CS = EmitRuntimeCallOrInvoke(SetJmp3, Args);
  1587. } else {
  1588. llvm::Type *ArgTypes[] = {Int8PtrTy, Int8PtrTy};
  1589. llvm::Constant *SetJmp = CGM.CreateRuntimeFunction(
  1590. llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/false),
  1591. "_setjmp", ReturnsTwiceAttr);
  1592. llvm::Value *FrameAddr =
  1593. Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress),
  1594. ConstantInt::get(Int32Ty, 0));
  1595. llvm::Value *Args[] = {Buf, FrameAddr};
  1596. CS = EmitRuntimeCallOrInvoke(SetJmp, Args);
  1597. }
  1598. CS.setAttributes(ReturnsTwiceAttr);
  1599. return RValue::get(CS.getInstruction());
  1600. }
  1601. break;
  1602. }
  1603. case Builtin::BI__GetExceptionInfo: {
  1604. if (llvm::GlobalVariable *GV =
  1605. CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
  1606. return RValue::get(llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy));
  1607. break;
  1608. }
  1609. #endif // HLSL Change Ends - no support for exception handling
  1610. }
  1611. // If this is an alias for a lib function (e.g. __builtin_sin), emit
  1612. // the call using the normal call path, but using the unmangled
  1613. // version of the function name.
  1614. if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
  1615. return emitLibraryCall(*this, FD, E,
  1616. CGM.getBuiltinLibFunction(FD, BuiltinID));
  1617. // If this is a predefined lib function (e.g. malloc), emit the call
  1618. // using exactly the normal call path.
  1619. if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
  1620. return emitLibraryCall(*this, FD, E, EmitScalarExpr(E->getCallee()));
  1621. // See if we have a target specific intrinsic.
  1622. const char *Name = getContext().BuiltinInfo.GetName(BuiltinID);
  1623. Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
  1624. if (const char *Prefix =
  1625. llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch())) {
  1626. IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name);
  1627. // NOTE we dont need to perform a compatibility flag check here since the
  1628. // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
  1629. // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
  1630. if (IntrinsicID == Intrinsic::not_intrinsic)
  1631. IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix, Name);
  1632. }
  1633. if (IntrinsicID != Intrinsic::not_intrinsic) {
  1634. SmallVector<Value*, 16> Args;
  1635. // Find out if any arguments are required to be integer constant
  1636. // expressions.
  1637. unsigned ICEArguments = 0;
  1638. ASTContext::GetBuiltinTypeError Error;
  1639. getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
  1640. assert(Error == ASTContext::GE_None && "Should not codegen an error");
  1641. Function *F = CGM.getIntrinsic(IntrinsicID);
  1642. llvm::FunctionType *FTy = F->getFunctionType();
  1643. for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
  1644. Value *ArgValue;
  1645. // If this is a normal argument, just emit it as a scalar.
  1646. if ((ICEArguments & (1 << i)) == 0) {
  1647. ArgValue = EmitScalarExpr(E->getArg(i));
  1648. } else {
  1649. // If this is required to be a constant, constant fold it so that we
  1650. // know that the generated intrinsic gets a ConstantInt.
  1651. llvm::APSInt Result;
  1652. bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext());
  1653. assert(IsConst && "Constant arg isn't actually constant?");
  1654. (void)IsConst;
  1655. ArgValue = llvm::ConstantInt::get(getLLVMContext(), Result);
  1656. }
  1657. // If the intrinsic arg type is different from the builtin arg type
  1658. // we need to do a bit cast.
  1659. llvm::Type *PTy = FTy->getParamType(i);
  1660. if (PTy != ArgValue->getType()) {
  1661. assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
  1662. "Must be able to losslessly bit cast to param");
  1663. ArgValue = Builder.CreateBitCast(ArgValue, PTy);
  1664. }
  1665. Args.push_back(ArgValue);
  1666. }
  1667. Value *V = Builder.CreateCall(F, Args);
  1668. QualType BuiltinRetType = E->getType();
  1669. llvm::Type *RetTy = VoidTy;
  1670. if (!BuiltinRetType->isVoidType())
  1671. RetTy = ConvertType(BuiltinRetType);
  1672. if (RetTy != V->getType()) {
  1673. assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
  1674. "Must be able to losslessly bit cast result type");
  1675. V = Builder.CreateBitCast(V, RetTy);
  1676. }
  1677. return RValue::get(V);
  1678. }
  1679. // See if we have a target specific builtin that needs to be lowered.
  1680. if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E))
  1681. return RValue::get(V);
  1682. ErrorUnsupported(E, "builtin function");
  1683. // Unknown builtin, for now just dump it out and return undef.
  1684. return GetUndefRValue(E->getType());
  1685. }
  1686. Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
  1687. const CallExpr *E) {
  1688. #if 1 // HLSL Change Starts
  1689. return nullptr;
  1690. #else
  1691. switch (getTarget().getTriple().getArch()) {
  1692. case llvm::Triple::arm:
  1693. case llvm::Triple::armeb:
  1694. case llvm::Triple::thumb:
  1695. case llvm::Triple::thumbeb:
  1696. return EmitARMBuiltinExpr(BuiltinID, E);
  1697. case llvm::Triple::aarch64:
  1698. case llvm::Triple::aarch64_be:
  1699. return EmitAArch64BuiltinExpr(BuiltinID, E);
  1700. case llvm::Triple::x86:
  1701. case llvm::Triple::x86_64:
  1702. return EmitX86BuiltinExpr(BuiltinID, E);
  1703. case llvm::Triple::ppc:
  1704. case llvm::Triple::ppc64:
  1705. case llvm::Triple::ppc64le:
  1706. return EmitPPCBuiltinExpr(BuiltinID, E);
  1707. case llvm::Triple::r600:
  1708. case llvm::Triple::amdgcn:
  1709. return EmitAMDGPUBuiltinExpr(BuiltinID, E);
  1710. case llvm::Triple::systemz:
  1711. return EmitSystemZBuiltinExpr(BuiltinID, E);
  1712. case llvm::Triple::nvptx:
  1713. case llvm::Triple::nvptx64:
  1714. return EmitNVPTXBuiltinExpr(BuiltinID, E);
  1715. default:
  1716. return nullptr;
  1717. }
  1718. #endif // HLSL Change Ends
  1719. }
  1720. #if 0 // HLSL Change Starts
  1721. static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
  1722. NeonTypeFlags TypeFlags,
  1723. bool V1Ty=false) {
  1724. int IsQuad = TypeFlags.isQuad();
  1725. switch (TypeFlags.getEltType()) {
  1726. case NeonTypeFlags::Int8:
  1727. case NeonTypeFlags::Poly8:
  1728. return llvm::VectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
  1729. case NeonTypeFlags::Int16:
  1730. case NeonTypeFlags::Poly16:
  1731. case NeonTypeFlags::Float16:
  1732. return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
  1733. case NeonTypeFlags::Int32:
  1734. return llvm::VectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
  1735. case NeonTypeFlags::Int64:
  1736. case NeonTypeFlags::Poly64:
  1737. return llvm::VectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
  1738. case NeonTypeFlags::Poly128:
  1739. // FIXME: i128 and f128 doesn't get fully support in Clang and llvm.
  1740. // There is a lot of i128 and f128 API missing.
  1741. // so we use v16i8 to represent poly128 and get pattern matched.
  1742. return llvm::VectorType::get(CGF->Int8Ty, 16);
  1743. case NeonTypeFlags::Float32:
  1744. return llvm::VectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
  1745. case NeonTypeFlags::Float64:
  1746. return llvm::VectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
  1747. }
  1748. llvm_unreachable("Unknown vector element type!");
  1749. }
  1750. Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
  1751. unsigned nElts = cast<llvm::VectorType>(V->getType())->getNumElements();
  1752. Value* SV = llvm::ConstantVector::getSplat(nElts, C);
  1753. return Builder.CreateShuffleVector(V, V, SV, "lane");
  1754. }
  1755. Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
  1756. const char *name,
  1757. unsigned shift, bool rightshift) {
  1758. unsigned j = 0;
  1759. for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
  1760. ai != ae; ++ai, ++j)
  1761. if (shift > 0 && shift == j)
  1762. Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
  1763. else
  1764. Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
  1765. return Builder.CreateCall(F, Ops, name);
  1766. }
  1767. Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
  1768. bool neg) {
  1769. int SV = cast<ConstantInt>(V)->getSExtValue();
  1770. llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
  1771. llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV);
  1772. return llvm::ConstantVector::getSplat(VTy->getNumElements(), C);
  1773. }
  1774. // \brief Right-shift a vector by a constant.
  1775. Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
  1776. llvm::Type *Ty, bool usgn,
  1777. const char *name) {
  1778. llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
  1779. int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue();
  1780. int EltSize = VTy->getScalarSizeInBits();
  1781. Vec = Builder.CreateBitCast(Vec, Ty);
  1782. // lshr/ashr are undefined when the shift amount is equal to the vector
  1783. // element size.
  1784. if (ShiftAmt == EltSize) {
  1785. if (usgn) {
  1786. // Right-shifting an unsigned value by its size yields 0.
  1787. llvm::Constant *Zero = ConstantInt::get(VTy->getElementType(), 0);
  1788. return llvm::ConstantVector::getSplat(VTy->getNumElements(), Zero);
  1789. } else {
  1790. // Right-shifting a signed value by its size is equivalent
  1791. // to a shift of size-1.
  1792. --ShiftAmt;
  1793. Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt);
  1794. }
  1795. }
  1796. Shift = EmitNeonShiftVector(Shift, Ty, false);
  1797. if (usgn)
  1798. return Builder.CreateLShr(Vec, Shift, name);
  1799. else
  1800. return Builder.CreateAShr(Vec, Shift, name);
  1801. }
  1802. #endif // HLSL Change Ends
  1803. /// GetPointeeAlignment - Given an expression with a pointer type, find the
  1804. /// alignment of the type referenced by the pointer. Skip over implicit
  1805. /// casts.
  1806. std::pair<llvm::Value*, unsigned>
  1807. CodeGenFunction::EmitPointerWithAlignment(const Expr *Addr) {
  1808. assert(Addr->getType()->isPointerType());
  1809. Addr = Addr->IgnoreParens();
  1810. if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Addr)) {
  1811. if ((ICE->getCastKind() == CK_BitCast || ICE->getCastKind() == CK_NoOp) &&
  1812. ICE->getSubExpr()->getType()->isPointerType()) {
  1813. std::pair<llvm::Value*, unsigned> Ptr =
  1814. EmitPointerWithAlignment(ICE->getSubExpr());
  1815. Ptr.first = Builder.CreateBitCast(Ptr.first,
  1816. ConvertType(Addr->getType()));
  1817. return Ptr;
  1818. } else if (ICE->getCastKind() == CK_ArrayToPointerDecay) {
  1819. LValue LV = EmitLValue(ICE->getSubExpr());
  1820. unsigned Align = LV.getAlignment().getQuantity();
  1821. if (!Align) {
  1822. // FIXME: Once LValues are fixed to always set alignment,
  1823. // zap this code.
  1824. QualType PtTy = ICE->getSubExpr()->getType();
  1825. if (!PtTy->isIncompleteType())
  1826. Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
  1827. else
  1828. Align = 1;
  1829. }
  1830. return std::make_pair(LV.getAddress(), Align);
  1831. }
  1832. }
  1833. if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Addr)) {
  1834. if (UO->getOpcode() == UO_AddrOf) {
  1835. LValue LV = EmitLValue(UO->getSubExpr());
  1836. unsigned Align = LV.getAlignment().getQuantity();
  1837. if (!Align) {
  1838. // FIXME: Once LValues are fixed to always set alignment,
  1839. // zap this code.
  1840. QualType PtTy = UO->getSubExpr()->getType();
  1841. if (!PtTy->isIncompleteType())
  1842. Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
  1843. else
  1844. Align = 1;
  1845. }
  1846. return std::make_pair(LV.getAddress(), Align);
  1847. }
  1848. }
  1849. unsigned Align = 1;
  1850. QualType PtTy = Addr->getType()->getPointeeType();
  1851. if (!PtTy->isIncompleteType())
  1852. Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
  1853. return std::make_pair(EmitScalarExpr(Addr), Align);
  1854. }
  1855. #if 0 // HLSL Change Starts
  1856. enum {
  1857. AddRetType = (1 << 0),
  1858. Add1ArgType = (1 << 1),
  1859. Add2ArgTypes = (1 << 2),
  1860. VectorizeRetType = (1 << 3),
  1861. VectorizeArgTypes = (1 << 4),
  1862. InventFloatType = (1 << 5),
  1863. UnsignedAlts = (1 << 6),
  1864. Use64BitVectors = (1 << 7),
  1865. Use128BitVectors = (1 << 8),
  1866. Vectorize1ArgType = Add1ArgType | VectorizeArgTypes,
  1867. VectorRet = AddRetType | VectorizeRetType,
  1868. VectorRetGetArgs01 =
  1869. AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes,
  1870. FpCmpzModifiers =
  1871. AddRetType | VectorizeRetType | Add1ArgType | InventFloatType
  1872. };
  1873. struct NeonIntrinsicInfo {
  1874. unsigned BuiltinID;
  1875. unsigned LLVMIntrinsic;
  1876. unsigned AltLLVMIntrinsic;
  1877. const char *NameHint;
  1878. unsigned TypeModifier;
  1879. bool operator<(unsigned RHSBuiltinID) const {
  1880. return BuiltinID < RHSBuiltinID;
  1881. }
  1882. };
  1883. #define NEONMAP0(NameBase) \
  1884. { NEON::BI__builtin_neon_ ## NameBase, 0, 0, #NameBase, 0 }
  1885. #define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
  1886. { NEON:: BI__builtin_neon_ ## NameBase, \
  1887. Intrinsic::LLVMIntrinsic, 0, #NameBase, TypeModifier }
  1888. #define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \
  1889. { NEON:: BI__builtin_neon_ ## NameBase, \
  1890. Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
  1891. #NameBase, TypeModifier }
  1892. static NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
  1893. NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
  1894. NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
  1895. NEONMAP1(vabs_v, arm_neon_vabs, 0),
  1896. NEONMAP1(vabsq_v, arm_neon_vabs, 0),
  1897. NEONMAP0(vaddhn_v),
  1898. NEONMAP1(vaesdq_v, arm_neon_aesd, 0),
  1899. NEONMAP1(vaeseq_v, arm_neon_aese, 0),
  1900. NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0),
  1901. NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0),
  1902. NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType),
  1903. NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType),
  1904. NEONMAP1(vcage_v, arm_neon_vacge, 0),
  1905. NEONMAP1(vcageq_v, arm_neon_vacge, 0),
  1906. NEONMAP1(vcagt_v, arm_neon_vacgt, 0),
  1907. NEONMAP1(vcagtq_v, arm_neon_vacgt, 0),
  1908. NEONMAP1(vcale_v, arm_neon_vacge, 0),
  1909. NEONMAP1(vcaleq_v, arm_neon_vacge, 0),
  1910. NEONMAP1(vcalt_v, arm_neon_vacgt, 0),
  1911. NEONMAP1(vcaltq_v, arm_neon_vacgt, 0),
  1912. NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType),
  1913. NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType),
  1914. NEONMAP1(vclz_v, ctlz, Add1ArgType),
  1915. NEONMAP1(vclzq_v, ctlz, Add1ArgType),
  1916. NEONMAP1(vcnt_v, ctpop, Add1ArgType),
  1917. NEONMAP1(vcntq_v, ctpop, Add1ArgType),
  1918. NEONMAP1(vcvt_f16_v, arm_neon_vcvtfp2hf, 0),
  1919. NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
  1920. NEONMAP0(vcvt_f32_v),
  1921. NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
  1922. NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
  1923. NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
  1924. NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
  1925. NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
  1926. NEONMAP0(vcvt_s32_v),
  1927. NEONMAP0(vcvt_s64_v),
  1928. NEONMAP0(vcvt_u32_v),
  1929. NEONMAP0(vcvt_u64_v),
  1930. NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
  1931. NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
  1932. NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
  1933. NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
  1934. NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
  1935. NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
  1936. NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
  1937. NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
  1938. NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
  1939. NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
  1940. NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
  1941. NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
  1942. NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
  1943. NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
  1944. NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
  1945. NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
  1946. NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
  1947. NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
  1948. NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
  1949. NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
  1950. NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
  1951. NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
  1952. NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
  1953. NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
  1954. NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
  1955. NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
  1956. NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
  1957. NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
  1958. NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
  1959. NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
  1960. NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
  1961. NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
  1962. NEONMAP0(vcvtq_f32_v),
  1963. NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
  1964. NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
  1965. NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
  1966. NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
  1967. NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
  1968. NEONMAP0(vcvtq_s32_v),
  1969. NEONMAP0(vcvtq_s64_v),
  1970. NEONMAP0(vcvtq_u32_v),
  1971. NEONMAP0(vcvtq_u64_v),
  1972. NEONMAP0(vext_v),
  1973. NEONMAP0(vextq_v),
  1974. NEONMAP0(vfma_v),
  1975. NEONMAP0(vfmaq_v),
  1976. NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
  1977. NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
  1978. NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
  1979. NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
  1980. NEONMAP0(vld1_dup_v),
  1981. NEONMAP1(vld1_v, arm_neon_vld1, 0),
  1982. NEONMAP0(vld1q_dup_v),
  1983. NEONMAP1(vld1q_v, arm_neon_vld1, 0),
  1984. NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
  1985. NEONMAP1(vld2_v, arm_neon_vld2, 0),
  1986. NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
  1987. NEONMAP1(vld2q_v, arm_neon_vld2, 0),
  1988. NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
  1989. NEONMAP1(vld3_v, arm_neon_vld3, 0),
  1990. NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
  1991. NEONMAP1(vld3q_v, arm_neon_vld3, 0),
  1992. NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
  1993. NEONMAP1(vld4_v, arm_neon_vld4, 0),
  1994. NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
  1995. NEONMAP1(vld4q_v, arm_neon_vld4, 0),
  1996. NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
  1997. NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType),
  1998. NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType),
  1999. NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
  2000. NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
  2001. NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
  2002. NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
  2003. NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
  2004. NEONMAP0(vmovl_v),
  2005. NEONMAP0(vmovn_v),
  2006. NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
  2007. NEONMAP0(vmull_v),
  2008. NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType),
  2009. NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
  2010. NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
  2011. NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType),
  2012. NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
  2013. NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
  2014. NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType),
  2015. NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts),
  2016. NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts),
  2017. NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType),
  2018. NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType),
  2019. NEONMAP2(vqadd_v, arm_neon_vqaddu, arm_neon_vqadds, Add1ArgType | UnsignedAlts),
  2020. NEONMAP2(vqaddq_v, arm_neon_vqaddu, arm_neon_vqadds, Add1ArgType | UnsignedAlts),
  2021. NEONMAP2(vqdmlal_v, arm_neon_vqdmull, arm_neon_vqadds, 0),
  2022. NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, arm_neon_vqsubs, 0),
  2023. NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType),
  2024. NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType),
  2025. NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType),
  2026. NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts),
  2027. NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType),
  2028. NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType),
  2029. NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType),
  2030. NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType),
  2031. NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType),
  2032. NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
  2033. NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
  2034. NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
  2035. NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
  2036. NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
  2037. NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
  2038. NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0),
  2039. NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0),
  2040. NEONMAP2(vqsub_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts),
  2041. NEONMAP2(vqsubq_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts),
  2042. NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType),
  2043. NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
  2044. NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
  2045. NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType),
  2046. NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType),
  2047. NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
  2048. NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
  2049. NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType),
  2050. NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType),
  2051. NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType),
  2052. NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType),
  2053. NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType),
  2054. NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType),
  2055. NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType),
  2056. NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType),
  2057. NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType),
  2058. NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType),
  2059. NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType),
  2060. NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType),
  2061. NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
  2062. NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
  2063. NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
  2064. NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
  2065. NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
  2066. NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
  2067. NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType),
  2068. NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType),
  2069. NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType),
  2070. NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0),
  2071. NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0),
  2072. NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0),
  2073. NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0),
  2074. NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0),
  2075. NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0),
  2076. NEONMAP0(vshl_n_v),
  2077. NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
  2078. NEONMAP0(vshll_n_v),
  2079. NEONMAP0(vshlq_n_v),
  2080. NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
  2081. NEONMAP0(vshr_n_v),
  2082. NEONMAP0(vshrn_n_v),
  2083. NEONMAP0(vshrq_n_v),
  2084. NEONMAP1(vst1_v, arm_neon_vst1, 0),
  2085. NEONMAP1(vst1q_v, arm_neon_vst1, 0),
  2086. NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0),
  2087. NEONMAP1(vst2_v, arm_neon_vst2, 0),
  2088. NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0),
  2089. NEONMAP1(vst2q_v, arm_neon_vst2, 0),
  2090. NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0),
  2091. NEONMAP1(vst3_v, arm_neon_vst3, 0),
  2092. NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0),
  2093. NEONMAP1(vst3q_v, arm_neon_vst3, 0),
  2094. NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0),
  2095. NEONMAP1(vst4_v, arm_neon_vst4, 0),
  2096. NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0),
  2097. NEONMAP1(vst4q_v, arm_neon_vst4, 0),
  2098. NEONMAP0(vsubhn_v),
  2099. NEONMAP0(vtrn_v),
  2100. NEONMAP0(vtrnq_v),
  2101. NEONMAP0(vtst_v),
  2102. NEONMAP0(vtstq_v),
  2103. NEONMAP0(vuzp_v),
  2104. NEONMAP0(vuzpq_v),
  2105. NEONMAP0(vzip_v),
  2106. NEONMAP0(vzipq_v)
  2107. };
  2108. static NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
  2109. NEONMAP1(vabs_v, aarch64_neon_abs, 0),
  2110. NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
  2111. NEONMAP0(vaddhn_v),
  2112. NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0),
  2113. NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0),
  2114. NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0),
  2115. NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0),
  2116. NEONMAP1(vcage_v, aarch64_neon_facge, 0),
  2117. NEONMAP1(vcageq_v, aarch64_neon_facge, 0),
  2118. NEONMAP1(vcagt_v, aarch64_neon_facgt, 0),
  2119. NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0),
  2120. NEONMAP1(vcale_v, aarch64_neon_facge, 0),
  2121. NEONMAP1(vcaleq_v, aarch64_neon_facge, 0),
  2122. NEONMAP1(vcalt_v, aarch64_neon_facgt, 0),
  2123. NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0),
  2124. NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType),
  2125. NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType),
  2126. NEONMAP1(vclz_v, ctlz, Add1ArgType),
  2127. NEONMAP1(vclzq_v, ctlz, Add1ArgType),
  2128. NEONMAP1(vcnt_v, ctpop, Add1ArgType),
  2129. NEONMAP1(vcntq_v, ctpop, Add1ArgType),
  2130. NEONMAP1(vcvt_f16_v, aarch64_neon_vcvtfp2hf, 0),
  2131. NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
  2132. NEONMAP0(vcvt_f32_v),
  2133. NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
  2134. NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
  2135. NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
  2136. NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
  2137. NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
  2138. NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
  2139. NEONMAP0(vcvtq_f32_v),
  2140. NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
  2141. NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
  2142. NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
  2143. NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
  2144. NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
  2145. NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
  2146. NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
  2147. NEONMAP0(vext_v),
  2148. NEONMAP0(vextq_v),
  2149. NEONMAP0(vfma_v),
  2150. NEONMAP0(vfmaq_v),
  2151. NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
  2152. NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
  2153. NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
  2154. NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
  2155. NEONMAP0(vmovl_v),
  2156. NEONMAP0(vmovn_v),
  2157. NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
  2158. NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType),
  2159. NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType),
  2160. NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
  2161. NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
  2162. NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType),
  2163. NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType),
  2164. NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType),
  2165. NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
  2166. NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
  2167. NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
  2168. NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
  2169. NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType),
  2170. NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType),
  2171. NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType),
  2172. NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts),
  2173. NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
  2174. NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
  2175. NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
  2176. NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
  2177. NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType),
  2178. NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
  2179. NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
  2180. NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts),
  2181. NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
  2182. NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts),
  2183. NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
  2184. NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0),
  2185. NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0),
  2186. NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
  2187. NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
  2188. NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
  2189. NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
  2190. NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
  2191. NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType),
  2192. NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
  2193. NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
  2194. NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
  2195. NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
  2196. NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
  2197. NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
  2198. NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
  2199. NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
  2200. NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
  2201. NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType),
  2202. NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType),
  2203. NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType),
  2204. NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0),
  2205. NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0),
  2206. NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0),
  2207. NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0),
  2208. NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0),
  2209. NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0),
  2210. NEONMAP0(vshl_n_v),
  2211. NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
  2212. NEONMAP0(vshll_n_v),
  2213. NEONMAP0(vshlq_n_v),
  2214. NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
  2215. NEONMAP0(vshr_n_v),
  2216. NEONMAP0(vshrn_n_v),
  2217. NEONMAP0(vshrq_n_v),
  2218. NEONMAP0(vsubhn_v),
  2219. NEONMAP0(vtst_v),
  2220. NEONMAP0(vtstq_v),
  2221. };
  2222. static NeonIntrinsicInfo AArch64SISDIntrinsicMap[] = {
  2223. NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType),
  2224. NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType),
  2225. NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType),
  2226. NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
  2227. NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
  2228. NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
  2229. NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
  2230. NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
  2231. NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
  2232. NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
  2233. NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
  2234. NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType),
  2235. NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
  2236. NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType),
  2237. NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
  2238. NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
  2239. NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
  2240. NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
  2241. NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
  2242. NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
  2243. NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
  2244. NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
  2245. NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
  2246. NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
  2247. NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
  2248. NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
  2249. NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
  2250. NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
  2251. NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
  2252. NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
  2253. NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
  2254. NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
  2255. NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
  2256. NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
  2257. NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
  2258. NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
  2259. NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
  2260. NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
  2261. NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
  2262. NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
  2263. NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
  2264. NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
  2265. NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
  2266. NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
  2267. NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
  2268. NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
  2269. NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
  2270. NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
  2271. NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
  2272. NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
  2273. NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
  2274. NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
  2275. NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
  2276. NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
  2277. NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
  2278. NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
  2279. NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
  2280. NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
  2281. NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
  2282. NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
  2283. NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
  2284. NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
  2285. NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
  2286. NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
  2287. NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
  2288. NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
  2289. NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
  2290. NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
  2291. NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
  2292. NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
  2293. NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType),
  2294. NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType),
  2295. NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
  2296. NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
  2297. NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
  2298. NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
  2299. NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
  2300. NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
  2301. NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
  2302. NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
  2303. NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
  2304. NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
  2305. NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
  2306. NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType),
  2307. NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
  2308. NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType),
  2309. NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
  2310. NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
  2311. NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType),
  2312. NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType),
  2313. NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
  2314. NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
  2315. NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType),
  2316. NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType),
  2317. NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors),
  2318. NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType),
  2319. NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors),
  2320. NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0),
  2321. NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType),
  2322. NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType),
  2323. NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
  2324. NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
  2325. NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
  2326. NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
  2327. NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType),
  2328. NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
  2329. NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
  2330. NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
  2331. NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType),
  2332. NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
  2333. NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType),
  2334. NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
  2335. NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType),
  2336. NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
  2337. NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
  2338. NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType),
  2339. NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType),
  2340. NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
  2341. NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
  2342. NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType),
  2343. NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType),
  2344. NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType),
  2345. NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType),
  2346. NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
  2347. NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
  2348. NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
  2349. NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
  2350. NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType),
  2351. NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
  2352. NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
  2353. NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
  2354. NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
  2355. NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
  2356. NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
  2357. NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType),
  2358. NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType),
  2359. NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
  2360. NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
  2361. NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
  2362. NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
  2363. NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType),
  2364. NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType),
  2365. NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType),
  2366. NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType),
  2367. NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
  2368. NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
  2369. NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType),
  2370. NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType),
  2371. NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType),
  2372. NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
  2373. NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
  2374. NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
  2375. NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
  2376. NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType),
  2377. NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
  2378. NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
  2379. NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
  2380. NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
  2381. NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType),
  2382. NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType),
  2383. NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
  2384. NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
  2385. NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType),
  2386. NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType),
  2387. NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType),
  2388. NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType),
  2389. NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType),
  2390. NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType),
  2391. NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType),
  2392. NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType),
  2393. NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType),
  2394. NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType),
  2395. NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType),
  2396. NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType),
  2397. NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0),
  2398. NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0),
  2399. NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0),
  2400. NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0),
  2401. NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType),
  2402. NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType),
  2403. NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType),
  2404. NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType),
  2405. NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
  2406. NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType),
  2407. NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
  2408. NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType),
  2409. NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType),
  2410. NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType),
  2411. NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
  2412. NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType),
  2413. NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
  2414. NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType),
  2415. };
  2416. #undef NEONMAP0
  2417. #undef NEONMAP1
  2418. #undef NEONMAP2
  2419. static bool NEONSIMDIntrinsicsProvenSorted = false;
  2420. static bool AArch64SIMDIntrinsicsProvenSorted = false;
  2421. static bool AArch64SISDIntrinsicsProvenSorted = false;
  2422. static const NeonIntrinsicInfo *
  2423. findNeonIntrinsicInMap(ArrayRef<NeonIntrinsicInfo> IntrinsicMap,
  2424. unsigned BuiltinID, bool &MapProvenSorted) {
  2425. #ifndef NDEBUG
  2426. if (!MapProvenSorted) {
  2427. // FIXME: use std::is_sorted once C++11 is allowed
  2428. for (unsigned i = 0; i < IntrinsicMap.size() - 1; ++i)
  2429. assert(IntrinsicMap[i].BuiltinID <= IntrinsicMap[i + 1].BuiltinID);
  2430. MapProvenSorted = true;
  2431. }
  2432. #endif
  2433. const NeonIntrinsicInfo *Builtin =
  2434. std::lower_bound(IntrinsicMap.begin(), IntrinsicMap.end(), BuiltinID);
  2435. if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID)
  2436. return Builtin;
  2437. return nullptr;
  2438. }
  2439. Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
  2440. unsigned Modifier,
  2441. llvm::Type *ArgType,
  2442. const CallExpr *E) {
  2443. int VectorSize = 0;
  2444. if (Modifier & Use64BitVectors)
  2445. VectorSize = 64;
  2446. else if (Modifier & Use128BitVectors)
  2447. VectorSize = 128;
  2448. // Return type.
  2449. SmallVector<llvm::Type *, 3> Tys;
  2450. if (Modifier & AddRetType) {
  2451. llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
  2452. if (Modifier & VectorizeRetType)
  2453. Ty = llvm::VectorType::get(
  2454. Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1);
  2455. Tys.push_back(Ty);
  2456. }
  2457. // Arguments.
  2458. if (Modifier & VectorizeArgTypes) {
  2459. int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1;
  2460. ArgType = llvm::VectorType::get(ArgType, Elts);
  2461. }
  2462. if (Modifier & (Add1ArgType | Add2ArgTypes))
  2463. Tys.push_back(ArgType);
  2464. if (Modifier & Add2ArgTypes)
  2465. Tys.push_back(ArgType);
  2466. if (Modifier & InventFloatType)
  2467. Tys.push_back(FloatTy);
  2468. return CGM.getIntrinsic(IntrinsicID, Tys);
  2469. }
  2470. static Value *EmitCommonNeonSISDBuiltinExpr(CodeGenFunction &CGF,
  2471. const NeonIntrinsicInfo &SISDInfo,
  2472. SmallVectorImpl<Value *> &Ops,
  2473. const CallExpr *E) {
  2474. unsigned BuiltinID = SISDInfo.BuiltinID;
  2475. unsigned int Int = SISDInfo.LLVMIntrinsic;
  2476. unsigned Modifier = SISDInfo.TypeModifier;
  2477. const char *s = SISDInfo.NameHint;
  2478. switch (BuiltinID) {
  2479. case NEON::BI__builtin_neon_vcled_s64:
  2480. case NEON::BI__builtin_neon_vcled_u64:
  2481. case NEON::BI__builtin_neon_vcles_f32:
  2482. case NEON::BI__builtin_neon_vcled_f64:
  2483. case NEON::BI__builtin_neon_vcltd_s64:
  2484. case NEON::BI__builtin_neon_vcltd_u64:
  2485. case NEON::BI__builtin_neon_vclts_f32:
  2486. case NEON::BI__builtin_neon_vcltd_f64:
  2487. case NEON::BI__builtin_neon_vcales_f32:
  2488. case NEON::BI__builtin_neon_vcaled_f64:
  2489. case NEON::BI__builtin_neon_vcalts_f32:
  2490. case NEON::BI__builtin_neon_vcaltd_f64:
  2491. // Only one direction of comparisons actually exist, cmle is actually a cmge
  2492. // with swapped operands. The table gives us the right intrinsic but we
  2493. // still need to do the swap.
  2494. std::swap(Ops[0], Ops[1]);
  2495. break;
  2496. }
  2497. assert(Int && "Generic code assumes a valid intrinsic");
  2498. // Determine the type(s) of this overloaded AArch64 intrinsic.
  2499. const Expr *Arg = E->getArg(0);
  2500. llvm::Type *ArgTy = CGF.ConvertType(Arg->getType());
  2501. Function *F = CGF.LookupNeonLLVMIntrinsic(Int, Modifier, ArgTy, E);
  2502. int j = 0;
  2503. ConstantInt *C0 = ConstantInt::get(CGF.SizeTy, 0);
  2504. for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
  2505. ai != ae; ++ai, ++j) {
  2506. llvm::Type *ArgTy = ai->getType();
  2507. if (Ops[j]->getType()->getPrimitiveSizeInBits() ==
  2508. ArgTy->getPrimitiveSizeInBits())
  2509. continue;
  2510. assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy());
  2511. // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate
  2512. // it before inserting.
  2513. Ops[j] =
  2514. CGF.Builder.CreateTruncOrBitCast(Ops[j], ArgTy->getVectorElementType());
  2515. Ops[j] =
  2516. CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0);
  2517. }
  2518. Value *Result = CGF.EmitNeonCall(F, Ops, s);
  2519. llvm::Type *ResultType = CGF.ConvertType(E->getType());
  2520. if (ResultType->getPrimitiveSizeInBits() <
  2521. Result->getType()->getPrimitiveSizeInBits())
  2522. return CGF.Builder.CreateExtractElement(Result, C0);
  2523. return CGF.Builder.CreateBitCast(Result, ResultType, s);
  2524. }
  2525. Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
  2526. unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic,
  2527. const char *NameHint, unsigned Modifier, const CallExpr *E,
  2528. SmallVectorImpl<llvm::Value *> &Ops, llvm::Value *Align) {
  2529. // Get the last argument, which specifies the vector type.
  2530. llvm::APSInt NeonTypeConst;
  2531. const Expr *Arg = E->getArg(E->getNumArgs() - 1);
  2532. if (!Arg->isIntegerConstantExpr(NeonTypeConst, getContext()))
  2533. return nullptr;
  2534. // Determine the type of this overloaded NEON intrinsic.
  2535. NeonTypeFlags Type(NeonTypeConst.getZExtValue());
  2536. bool Usgn = Type.isUnsigned();
  2537. bool Quad = Type.isQuad();
  2538. llvm::VectorType *VTy = GetNeonType(this, Type);
  2539. llvm::Type *Ty = VTy;
  2540. if (!Ty)
  2541. return nullptr;
  2542. unsigned Int = LLVMIntrinsic;
  2543. if ((Modifier & UnsignedAlts) && !Usgn)
  2544. Int = AltLLVMIntrinsic;
  2545. switch (BuiltinID) {
  2546. default: break;
  2547. case NEON::BI__builtin_neon_vabs_v:
  2548. case NEON::BI__builtin_neon_vabsq_v:
  2549. if (VTy->getElementType()->isFloatingPointTy())
  2550. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
  2551. return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs");
  2552. case NEON::BI__builtin_neon_vaddhn_v: {
  2553. llvm::VectorType *SrcTy =
  2554. llvm::VectorType::getExtendedElementVectorType(VTy);
  2555. // %sum = add <4 x i32> %lhs, %rhs
  2556. Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
  2557. Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
  2558. Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
  2559. // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
  2560. Constant *ShiftAmt = ConstantInt::get(SrcTy->getElementType(),
  2561. SrcTy->getScalarSizeInBits() / 2);
  2562. ShiftAmt = ConstantVector::getSplat(VTy->getNumElements(), ShiftAmt);
  2563. Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
  2564. // %res = trunc <4 x i32> %high to <4 x i16>
  2565. return Builder.CreateTrunc(Ops[0], VTy, "vaddhn");
  2566. }
  2567. case NEON::BI__builtin_neon_vcale_v:
  2568. case NEON::BI__builtin_neon_vcaleq_v:
  2569. case NEON::BI__builtin_neon_vcalt_v:
  2570. case NEON::BI__builtin_neon_vcaltq_v:
  2571. std::swap(Ops[0], Ops[1]);
  2572. case NEON::BI__builtin_neon_vcage_v:
  2573. case NEON::BI__builtin_neon_vcageq_v:
  2574. case NEON::BI__builtin_neon_vcagt_v:
  2575. case NEON::BI__builtin_neon_vcagtq_v: {
  2576. llvm::Type *VecFlt = llvm::VectorType::get(
  2577. VTy->getScalarSizeInBits() == 32 ? FloatTy : DoubleTy,
  2578. VTy->getNumElements());
  2579. llvm::Type *Tys[] = { VTy, VecFlt };
  2580. Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
  2581. return EmitNeonCall(F, Ops, NameHint);
  2582. }
  2583. case NEON::BI__builtin_neon_vclz_v:
  2584. case NEON::BI__builtin_neon_vclzq_v:
  2585. // We generate target-independent intrinsic, which needs a second argument
  2586. // for whether or not clz of zero is undefined; on ARM it isn't.
  2587. Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
  2588. break;
  2589. case NEON::BI__builtin_neon_vcvt_f32_v:
  2590. case NEON::BI__builtin_neon_vcvtq_f32_v:
  2591. Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
  2592. Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad));
  2593. return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
  2594. : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
  2595. case NEON::BI__builtin_neon_vcvt_n_f32_v:
  2596. case NEON::BI__builtin_neon_vcvt_n_f64_v:
  2597. case NEON::BI__builtin_neon_vcvtq_n_f32_v:
  2598. case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
  2599. bool Double =
  2600. (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
  2601. llvm::Type *FloatTy =
  2602. GetNeonType(this, NeonTypeFlags(Double ? NeonTypeFlags::Float64
  2603. : NeonTypeFlags::Float32,
  2604. false, Quad));
  2605. llvm::Type *Tys[2] = { FloatTy, Ty };
  2606. Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
  2607. Function *F = CGM.getIntrinsic(Int, Tys);
  2608. return EmitNeonCall(F, Ops, "vcvt_n");
  2609. }
  2610. case NEON::BI__builtin_neon_vcvt_n_s32_v:
  2611. case NEON::BI__builtin_neon_vcvt_n_u32_v:
  2612. case NEON::BI__builtin_neon_vcvt_n_s64_v:
  2613. case NEON::BI__builtin_neon_vcvt_n_u64_v:
  2614. case NEON::BI__builtin_neon_vcvtq_n_s32_v:
  2615. case NEON::BI__builtin_neon_vcvtq_n_u32_v:
  2616. case NEON::BI__builtin_neon_vcvtq_n_s64_v:
  2617. case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
  2618. bool Double =
  2619. (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
  2620. llvm::Type *FloatTy =
  2621. GetNeonType(this, NeonTypeFlags(Double ? NeonTypeFlags::Float64
  2622. : NeonTypeFlags::Float32,
  2623. false, Quad));
  2624. llvm::Type *Tys[2] = { Ty, FloatTy };
  2625. Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
  2626. return EmitNeonCall(F, Ops, "vcvt_n");
  2627. }
  2628. case NEON::BI__builtin_neon_vcvt_s32_v:
  2629. case NEON::BI__builtin_neon_vcvt_u32_v:
  2630. case NEON::BI__builtin_neon_vcvt_s64_v:
  2631. case NEON::BI__builtin_neon_vcvt_u64_v:
  2632. case NEON::BI__builtin_neon_vcvtq_s32_v:
  2633. case NEON::BI__builtin_neon_vcvtq_u32_v:
  2634. case NEON::BI__builtin_neon_vcvtq_s64_v:
  2635. case NEON::BI__builtin_neon_vcvtq_u64_v: {
  2636. bool Double =
  2637. (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
  2638. llvm::Type *FloatTy =
  2639. GetNeonType(this, NeonTypeFlags(Double ? NeonTypeFlags::Float64
  2640. : NeonTypeFlags::Float32,
  2641. false, Quad));
  2642. Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
  2643. return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
  2644. : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
  2645. }
  2646. case NEON::BI__builtin_neon_vcvta_s32_v:
  2647. case NEON::BI__builtin_neon_vcvta_s64_v:
  2648. case NEON::BI__builtin_neon_vcvta_u32_v:
  2649. case NEON::BI__builtin_neon_vcvta_u64_v:
  2650. case NEON::BI__builtin_neon_vcvtaq_s32_v:
  2651. case NEON::BI__builtin_neon_vcvtaq_s64_v:
  2652. case NEON::BI__builtin_neon_vcvtaq_u32_v:
  2653. case NEON::BI__builtin_neon_vcvtaq_u64_v:
  2654. case NEON::BI__builtin_neon_vcvtn_s32_v:
  2655. case NEON::BI__builtin_neon_vcvtn_s64_v:
  2656. case NEON::BI__builtin_neon_vcvtn_u32_v:
  2657. case NEON::BI__builtin_neon_vcvtn_u64_v:
  2658. case NEON::BI__builtin_neon_vcvtnq_s32_v:
  2659. case NEON::BI__builtin_neon_vcvtnq_s64_v:
  2660. case NEON::BI__builtin_neon_vcvtnq_u32_v:
  2661. case NEON::BI__builtin_neon_vcvtnq_u64_v:
  2662. case NEON::BI__builtin_neon_vcvtp_s32_v:
  2663. case NEON::BI__builtin_neon_vcvtp_s64_v:
  2664. case NEON::BI__builtin_neon_vcvtp_u32_v:
  2665. case NEON::BI__builtin_neon_vcvtp_u64_v:
  2666. case NEON::BI__builtin_neon_vcvtpq_s32_v:
  2667. case NEON::BI__builtin_neon_vcvtpq_s64_v:
  2668. case NEON::BI__builtin_neon_vcvtpq_u32_v:
  2669. case NEON::BI__builtin_neon_vcvtpq_u64_v:
  2670. case NEON::BI__builtin_neon_vcvtm_s32_v:
  2671. case NEON::BI__builtin_neon_vcvtm_s64_v:
  2672. case NEON::BI__builtin_neon_vcvtm_u32_v:
  2673. case NEON::BI__builtin_neon_vcvtm_u64_v:
  2674. case NEON::BI__builtin_neon_vcvtmq_s32_v:
  2675. case NEON::BI__builtin_neon_vcvtmq_s64_v:
  2676. case NEON::BI__builtin_neon_vcvtmq_u32_v:
  2677. case NEON::BI__builtin_neon_vcvtmq_u64_v: {
  2678. bool Double =
  2679. (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
  2680. llvm::Type *InTy =
  2681. GetNeonType(this,
  2682. NeonTypeFlags(Double ? NeonTypeFlags::Float64
  2683. : NeonTypeFlags::Float32, false, Quad));
  2684. llvm::Type *Tys[2] = { Ty, InTy };
  2685. return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
  2686. }
  2687. case NEON::BI__builtin_neon_vext_v:
  2688. case NEON::BI__builtin_neon_vextq_v: {
  2689. int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
  2690. SmallVector<Constant*, 16> Indices;
  2691. for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
  2692. Indices.push_back(ConstantInt::get(Int32Ty, i+CV));
  2693. Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
  2694. Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
  2695. Value *SV = llvm::ConstantVector::get(Indices);
  2696. return Builder.CreateShuffleVector(Ops[0], Ops[1], SV, "vext");
  2697. }
  2698. case NEON::BI__builtin_neon_vfma_v:
  2699. case NEON::BI__builtin_neon_vfmaq_v: {
  2700. Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
  2701. Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
  2702. Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
  2703. Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
  2704. // NEON intrinsic puts accumulator first, unlike the LLVM fma.
  2705. return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
  2706. }
  2707. case NEON::BI__builtin_neon_vld1_v:
  2708. case NEON::BI__builtin_neon_vld1q_v:
  2709. Ops.push_back(Align);
  2710. return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vld1");
  2711. case NEON::BI__builtin_neon_vld2_v:
  2712. case NEON::BI__builtin_neon_vld2q_v:
  2713. case NEON::BI__builtin_neon_vld3_v:
  2714. case NEON::BI__builtin_neon_vld3q_v:
  2715. case NEON::BI__builtin_neon_vld4_v:
  2716. case NEON::BI__builtin_neon_vld4q_v: {
  2717. Function *F = CGM.getIntrinsic(LLVMIntrinsic, Ty);
  2718. Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint);
  2719. Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
  2720. Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
  2721. return Builder.CreateStore(Ops[1], Ops[0]);
  2722. }
  2723. case NEON::BI__builtin_neon_vld1_dup_v:
  2724. case NEON::BI__builtin_neon_vld1q_dup_v: {
  2725. Value *V = UndefValue::get(Ty);
  2726. Ty = llvm::PointerType::getUnqual(VTy->getElementType());
  2727. Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
  2728. LoadInst *Ld = Builder.CreateLoad(Ops[0]);
  2729. Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
  2730. llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
  2731. Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
  2732. return EmitNeonSplat(Ops[0], CI);
  2733. }
  2734. case NEON::BI__builtin_neon_vld2_lane_v:
  2735. case NEON::BI__builtin_neon_vld2q_lane_v:
  2736. case NEON::BI__builtin_neon_vld3_lane_v:
  2737. case NEON::BI__builtin_neon_vld3q_lane_v:
  2738. case NEON::BI__builtin_neon_vld4_lane_v:
  2739. case NEON::BI__builtin_neon_vld4q_lane_v: {
  2740. Function *F = CGM.getIntrinsic(LLVMIntrinsic, Ty);
  2741. for (unsigned I = 2; I < Ops.size() - 1; ++I)
  2742. Ops[I] = Builder.CreateBitCast(Ops[I], Ty);
  2743. Ops.push_back(Align);
  2744. Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint);
  2745. Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
  2746. Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
  2747. return Builder.CreateStore(Ops[1], Ops[0]);
  2748. }
  2749. case NEON::BI__builtin_neon_vmovl_v: {
  2750. llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
  2751. Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
  2752. if (Usgn)
  2753. return Builder.CreateZExt(Ops[0], Ty, "vmovl");
  2754. return Builder.CreateSExt(Ops[0], Ty, "vmovl");
  2755. }
  2756. case NEON::BI__builtin_neon_vmovn_v: {
  2757. llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy);
  2758. Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
  2759. return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
  2760. }
  2761. case NEON::BI__builtin_neon_vmull_v:
  2762. // FIXME: the integer vmull operations could be emitted in terms of pure
  2763. // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of
  2764. // hoisting the exts outside loops. Until global ISel comes along that can
  2765. // see through such movement this leads to bad CodeGen. So we need an
  2766. // intrinsic for now.
  2767. Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
  2768. Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
  2769. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
  2770. case NEON::BI__builtin_neon_vpadal_v:
  2771. case NEON::BI__builtin_neon_vpadalq_v: {
  2772. // The source operand type has twice as many elements of half the size.
  2773. unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
  2774. llvm::Type *EltTy =
  2775. llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
  2776. llvm::Type *NarrowTy =
  2777. llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
  2778. llvm::Type *Tys[2] = { Ty, NarrowTy };
  2779. return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
  2780. }
  2781. case NEON::BI__builtin_neon_vpaddl_v:
  2782. case NEON::BI__builtin_neon_vpaddlq_v: {
  2783. // The source operand type has twice as many elements of half the size.
  2784. unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
  2785. llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
  2786. llvm::Type *NarrowTy =
  2787. llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
  2788. llvm::Type *Tys[2] = { Ty, NarrowTy };
  2789. return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
  2790. }
  2791. case NEON::BI__builtin_neon_vqdmlal_v:
  2792. case NEON::BI__builtin_neon_vqdmlsl_v: {
  2793. SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
  2794. Value *Mul = EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty),
  2795. MulOps, "vqdmlal");
  2796. SmallVector<Value *, 2> AccumOps;
  2797. AccumOps.push_back(Ops[0]);
  2798. AccumOps.push_back(Mul);
  2799. return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty),
  2800. AccumOps, NameHint);
  2801. }
  2802. case NEON::BI__builtin_neon_vqshl_n_v:
  2803. case NEON::BI__builtin_neon_vqshlq_n_v:
  2804. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
  2805. 1, false);
  2806. case NEON::BI__builtin_neon_vqshlu_n_v:
  2807. case NEON::BI__builtin_neon_vqshluq_n_v:
  2808. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n",
  2809. 1, false);
  2810. case NEON::BI__builtin_neon_vrecpe_v:
  2811. case NEON::BI__builtin_neon_vrecpeq_v:
  2812. case NEON::BI__builtin_neon_vrsqrte_v:
  2813. case NEON::BI__builtin_neon_vrsqrteq_v:
  2814. Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic;
  2815. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
  2816. case NEON::BI__builtin_neon_vrshr_n_v:
  2817. case NEON::BI__builtin_neon_vrshrq_n_v:
  2818. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n",
  2819. 1, true);
  2820. case NEON::BI__builtin_neon_vshl_n_v:
  2821. case NEON::BI__builtin_neon_vshlq_n_v:
  2822. Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
  2823. return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
  2824. "vshl_n");
  2825. case NEON::BI__builtin_neon_vshll_n_v: {
  2826. llvm::Type *SrcTy = llvm::VectorType::getTruncatedElementVectorType(VTy);
  2827. Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
  2828. if (Usgn)
  2829. Ops[0] = Builder.CreateZExt(Ops[0], VTy);
  2830. else
  2831. Ops[0] = Builder.CreateSExt(Ops[0], VTy);
  2832. Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false);
  2833. return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
  2834. }
  2835. case NEON::BI__builtin_neon_vshrn_n_v: {
  2836. llvm::Type *SrcTy = llvm::VectorType::getExtendedElementVectorType(VTy);
  2837. Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
  2838. Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
  2839. if (Usgn)
  2840. Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]);
  2841. else
  2842. Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]);
  2843. return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n");
  2844. }
  2845. case NEON::BI__builtin_neon_vshr_n_v:
  2846. case NEON::BI__builtin_neon_vshrq_n_v:
  2847. return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n");
  2848. case NEON::BI__builtin_neon_vst1_v:
  2849. case NEON::BI__builtin_neon_vst1q_v:
  2850. case NEON::BI__builtin_neon_vst2_v:
  2851. case NEON::BI__builtin_neon_vst2q_v:
  2852. case NEON::BI__builtin_neon_vst3_v:
  2853. case NEON::BI__builtin_neon_vst3q_v:
  2854. case NEON::BI__builtin_neon_vst4_v:
  2855. case NEON::BI__builtin_neon_vst4q_v:
  2856. case NEON::BI__builtin_neon_vst2_lane_v:
  2857. case NEON::BI__builtin_neon_vst2q_lane_v:
  2858. case NEON::BI__builtin_neon_vst3_lane_v:
  2859. case NEON::BI__builtin_neon_vst3q_lane_v:
  2860. case NEON::BI__builtin_neon_vst4_lane_v:
  2861. case NEON::BI__builtin_neon_vst4q_lane_v:
  2862. Ops.push_back(Align);
  2863. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "");
  2864. case NEON::BI__builtin_neon_vsubhn_v: {
  2865. llvm::VectorType *SrcTy =
  2866. llvm::VectorType::getExtendedElementVectorType(VTy);
  2867. // %sum = add <4 x i32> %lhs, %rhs
  2868. Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
  2869. Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
  2870. Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
  2871. // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
  2872. Constant *ShiftAmt = ConstantInt::get(SrcTy->getElementType(),
  2873. SrcTy->getScalarSizeInBits() / 2);
  2874. ShiftAmt = ConstantVector::getSplat(VTy->getNumElements(), ShiftAmt);
  2875. Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
  2876. // %res = trunc <4 x i32> %high to <4 x i16>
  2877. return Builder.CreateTrunc(Ops[0], VTy, "vsubhn");
  2878. }
  2879. case NEON::BI__builtin_neon_vtrn_v:
  2880. case NEON::BI__builtin_neon_vtrnq_v: {
  2881. Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
  2882. Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
  2883. Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
  2884. Value *SV = nullptr;
  2885. for (unsigned vi = 0; vi != 2; ++vi) {
  2886. SmallVector<Constant*, 16> Indices;
  2887. for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
  2888. Indices.push_back(Builder.getInt32(i+vi));
  2889. Indices.push_back(Builder.getInt32(i+e+vi));
  2890. }
  2891. Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
  2892. SV = llvm::ConstantVector::get(Indices);
  2893. SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn");
  2894. SV = Builder.CreateStore(SV, Addr);
  2895. }
  2896. return SV;
  2897. }
  2898. case NEON::BI__builtin_neon_vtst_v:
  2899. case NEON::BI__builtin_neon_vtstq_v: {
  2900. Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
  2901. Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
  2902. Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
  2903. Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
  2904. ConstantAggregateZero::get(Ty));
  2905. return Builder.CreateSExt(Ops[0], Ty, "vtst");
  2906. }
  2907. case NEON::BI__builtin_neon_vuzp_v:
  2908. case NEON::BI__builtin_neon_vuzpq_v: {
  2909. Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
  2910. Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
  2911. Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
  2912. Value *SV = nullptr;
  2913. for (unsigned vi = 0; vi != 2; ++vi) {
  2914. SmallVector<Constant*, 16> Indices;
  2915. for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
  2916. Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi));
  2917. Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
  2918. SV = llvm::ConstantVector::get(Indices);
  2919. SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp");
  2920. SV = Builder.CreateStore(SV, Addr);
  2921. }
  2922. return SV;
  2923. }
  2924. case NEON::BI__builtin_neon_vzip_v:
  2925. case NEON::BI__builtin_neon_vzipq_v: {
  2926. Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
  2927. Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
  2928. Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
  2929. Value *SV = nullptr;
  2930. for (unsigned vi = 0; vi != 2; ++vi) {
  2931. SmallVector<Constant*, 16> Indices;
  2932. for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
  2933. Indices.push_back(ConstantInt::get(Int32Ty, (i + vi*e) >> 1));
  2934. Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e));
  2935. }
  2936. Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
  2937. SV = llvm::ConstantVector::get(Indices);
  2938. SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip");
  2939. SV = Builder.CreateStore(SV, Addr);
  2940. }
  2941. return SV;
  2942. }
  2943. }
  2944. assert(Int && "Expected valid intrinsic number");
  2945. // Determine the type(s) of this overloaded AArch64 intrinsic.
  2946. Function *F = LookupNeonLLVMIntrinsic(Int, Modifier, Ty, E);
  2947. Value *Result = EmitNeonCall(F, Ops, NameHint);
  2948. llvm::Type *ResultType = ConvertType(E->getType());
  2949. // AArch64 intrinsic one-element vector type cast to
  2950. // scalar type expected by the builtin
  2951. return Builder.CreateBitCast(Result, ResultType, NameHint);
  2952. }
  2953. Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
  2954. Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp,
  2955. const CmpInst::Predicate Ip, const Twine &Name) {
  2956. llvm::Type *OTy = Op->getType();
  2957. // FIXME: this is utterly horrific. We should not be looking at previous
  2958. // codegen context to find out what needs doing. Unfortunately TableGen
  2959. // currently gives us exactly the same calls for vceqz_f32 and vceqz_s32
  2960. // (etc).
  2961. if (BitCastInst *BI = dyn_cast<BitCastInst>(Op))
  2962. OTy = BI->getOperand(0)->getType();
  2963. Op = Builder.CreateBitCast(Op, OTy);
  2964. if (OTy->getScalarType()->isFloatingPointTy()) {
  2965. Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy));
  2966. } else {
  2967. Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy));
  2968. }
  2969. return Builder.CreateSExt(Op, Ty, Name);
  2970. }
  2971. static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
  2972. Value *ExtOp, Value *IndexOp,
  2973. llvm::Type *ResTy, unsigned IntID,
  2974. const char *Name) {
  2975. SmallVector<Value *, 2> TblOps;
  2976. if (ExtOp)
  2977. TblOps.push_back(ExtOp);
  2978. // Build a vector containing sequential number like (0, 1, 2, ..., 15)
  2979. SmallVector<Constant*, 16> Indices;
  2980. llvm::VectorType *TblTy = cast<llvm::VectorType>(Ops[0]->getType());
  2981. for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
  2982. Indices.push_back(ConstantInt::get(CGF.Int32Ty, 2*i));
  2983. Indices.push_back(ConstantInt::get(CGF.Int32Ty, 2*i+1));
  2984. }
  2985. Value *SV = llvm::ConstantVector::get(Indices);
  2986. int PairPos = 0, End = Ops.size() - 1;
  2987. while (PairPos < End) {
  2988. TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
  2989. Ops[PairPos+1], SV, Name));
  2990. PairPos += 2;
  2991. }
  2992. // If there's an odd number of 64-bit lookup table, fill the high 64-bit
  2993. // of the 128-bit lookup table with zero.
  2994. if (PairPos == End) {
  2995. Value *ZeroTbl = ConstantAggregateZero::get(TblTy);
  2996. TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
  2997. ZeroTbl, SV, Name));
  2998. }
  2999. Function *TblF;
  3000. TblOps.push_back(IndexOp);
  3001. TblF = CGF.CGM.getIntrinsic(IntID, ResTy);
  3002. return CGF.EmitNeonCall(TblF, TblOps, Name);
  3003. }
  3004. Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {
  3005. switch (BuiltinID) {
  3006. default:
  3007. return nullptr;
  3008. case ARM::BI__builtin_arm_nop:
  3009. return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
  3010. llvm::ConstantInt::get(Int32Ty, 0));
  3011. case ARM::BI__builtin_arm_yield:
  3012. case ARM::BI__yield:
  3013. return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
  3014. llvm::ConstantInt::get(Int32Ty, 1));
  3015. case ARM::BI__builtin_arm_wfe:
  3016. case ARM::BI__wfe:
  3017. return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
  3018. llvm::ConstantInt::get(Int32Ty, 2));
  3019. case ARM::BI__builtin_arm_wfi:
  3020. case ARM::BI__wfi:
  3021. return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
  3022. llvm::ConstantInt::get(Int32Ty, 3));
  3023. case ARM::BI__builtin_arm_sev:
  3024. case ARM::BI__sev:
  3025. return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
  3026. llvm::ConstantInt::get(Int32Ty, 4));
  3027. case ARM::BI__builtin_arm_sevl:
  3028. case ARM::BI__sevl:
  3029. return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
  3030. llvm::ConstantInt::get(Int32Ty, 5));
  3031. }
  3032. }
  3033. // Generates the IR for the read/write special register builtin,
  3034. // ValueType is the type of the value that is to be written or read,
  3035. // RegisterType is the type of the register being written to or read from.
  3036. static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
  3037. const CallExpr *E,
  3038. llvm::Type *RegisterType,
  3039. llvm::Type *ValueType, bool IsRead) {
  3040. // write and register intrinsics only support 32 and 64 bit operations.
  3041. assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64))
  3042. && "Unsupported size for register.");
  3043. CodeGen::CGBuilderTy &Builder = CGF.Builder;
  3044. CodeGen::CodeGenModule &CGM = CGF.CGM;
  3045. LLVMContext &Context = CGM.getLLVMContext();
  3046. const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts();
  3047. StringRef SysReg = cast<StringLiteral>(SysRegStrExpr)->getString();
  3048. llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) };
  3049. llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
  3050. llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
  3051. llvm::Type *Types[] = { RegisterType };
  3052. bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32);
  3053. assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64))
  3054. && "Can't fit 64-bit value in 32-bit register");
  3055. if (IsRead) {
  3056. llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
  3057. llvm::Value *Call = Builder.CreateCall(F, Metadata);
  3058. if (MixedTypes)
  3059. // Read into 64 bit register and then truncate result to 32 bit.
  3060. return Builder.CreateTrunc(Call, ValueType);
  3061. if (ValueType->isPointerTy())
  3062. // Have i32/i64 result (Call) but want to return a VoidPtrTy (i8*).
  3063. return Builder.CreateIntToPtr(Call, ValueType);
  3064. return Call;
  3065. }
  3066. llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
  3067. llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1));
  3068. if (MixedTypes) {
  3069. // Extend 32 bit write value to 64 bit to pass to write.
  3070. ArgValue = Builder.CreateZExt(ArgValue, RegisterType);
  3071. return Builder.CreateCall(F, { Metadata, ArgValue });
  3072. }
  3073. if (ValueType->isPointerTy()) {
  3074. // Have VoidPtrTy ArgValue but want to return an i32/i64.
  3075. ArgValue = Builder.CreatePtrToInt(ArgValue, RegisterType);
  3076. return Builder.CreateCall(F, { Metadata, ArgValue });
  3077. }
  3078. return Builder.CreateCall(F, { Metadata, ArgValue });
  3079. }
  3080. /// Return true if BuiltinID is an overloaded Neon intrinsic with an extra
  3081. /// argument that specifies the vector type.
  3082. static bool HasExtraNeonArgument(unsigned BuiltinID) {
  3083. switch (BuiltinID) {
  3084. default: break;
  3085. case NEON::BI__builtin_neon_vget_lane_i8:
  3086. case NEON::BI__builtin_neon_vget_lane_i16:
  3087. case NEON::BI__builtin_neon_vget_lane_i32:
  3088. case NEON::BI__builtin_neon_vget_lane_i64:
  3089. case NEON::BI__builtin_neon_vget_lane_f32:
  3090. case NEON::BI__builtin_neon_vgetq_lane_i8:
  3091. case NEON::BI__builtin_neon_vgetq_lane_i16:
  3092. case NEON::BI__builtin_neon_vgetq_lane_i32:
  3093. case NEON::BI__builtin_neon_vgetq_lane_i64:
  3094. case NEON::BI__builtin_neon_vgetq_lane_f32:
  3095. case NEON::BI__builtin_neon_vset_lane_i8:
  3096. case NEON::BI__builtin_neon_vset_lane_i16:
  3097. case NEON::BI__builtin_neon_vset_lane_i32:
  3098. case NEON::BI__builtin_neon_vset_lane_i64:
  3099. case NEON::BI__builtin_neon_vset_lane_f32:
  3100. case NEON::BI__builtin_neon_vsetq_lane_i8:
  3101. case NEON::BI__builtin_neon_vsetq_lane_i16:
  3102. case NEON::BI__builtin_neon_vsetq_lane_i32:
  3103. case NEON::BI__builtin_neon_vsetq_lane_i64:
  3104. case NEON::BI__builtin_neon_vsetq_lane_f32:
  3105. case NEON::BI__builtin_neon_vsha1h_u32:
  3106. case NEON::BI__builtin_neon_vsha1cq_u32:
  3107. case NEON::BI__builtin_neon_vsha1pq_u32:
  3108. case NEON::BI__builtin_neon_vsha1mq_u32:
  3109. case ARM::BI_MoveToCoprocessor:
  3110. case ARM::BI_MoveToCoprocessor2:
  3111. return false;
  3112. }
  3113. return true;
  3114. }
  3115. Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
  3116. const CallExpr *E) {
  3117. if (auto Hint = GetValueForARMHint(BuiltinID))
  3118. return Hint;
  3119. if (BuiltinID == ARM::BI__emit) {
  3120. bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb;
  3121. llvm::FunctionType *FTy =
  3122. llvm::FunctionType::get(VoidTy, /*Variadic=*/false);
  3123. APSInt Value;
  3124. if (!E->getArg(0)->EvaluateAsInt(Value, CGM.getContext()))
  3125. llvm_unreachable("Sema will ensure that the parameter is constant");
  3126. uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue();
  3127. llvm::InlineAsm *Emit =
  3128. IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "",
  3129. /*SideEffects=*/true)
  3130. : InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "",
  3131. /*SideEffects=*/true);
  3132. return Builder.CreateCall(Emit);
  3133. }
  3134. if (BuiltinID == ARM::BI__builtin_arm_dbg) {
  3135. Value *Option = EmitScalarExpr(E->getArg(0));
  3136. return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option);
  3137. }
  3138. if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
  3139. Value *Address = EmitScalarExpr(E->getArg(0));
  3140. Value *RW = EmitScalarExpr(E->getArg(1));
  3141. Value *IsData = EmitScalarExpr(E->getArg(2));
  3142. // Locality is not supported on ARM target
  3143. Value *Locality = llvm::ConstantInt::get(Int32Ty, 3);
  3144. Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
  3145. return Builder.CreateCall(F, {Address, RW, Locality, IsData});
  3146. }
  3147. if (BuiltinID == ARM::BI__builtin_arm_rbit) {
  3148. return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_rbit),
  3149. EmitScalarExpr(E->getArg(0)),
  3150. "rbit");
  3151. }
  3152. if (BuiltinID == ARM::BI__clear_cache) {
  3153. assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
  3154. const FunctionDecl *FD = E->getDirectCallee();
  3155. SmallVector<Value*, 2> Ops;
  3156. for (unsigned i = 0; i < 2; i++)
  3157. Ops.push_back(EmitScalarExpr(E->getArg(i)));
  3158. llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
  3159. llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
  3160. StringRef Name = FD->getName();
  3161. return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
  3162. }
  3163. if (BuiltinID == ARM::BI__builtin_arm_ldrexd ||
  3164. ((BuiltinID == ARM::BI__builtin_arm_ldrex ||
  3165. BuiltinID == ARM::BI__builtin_arm_ldaex) &&
  3166. getContext().getTypeSize(E->getType()) == 64) ||
  3167. BuiltinID == ARM::BI__ldrexd) {
  3168. Function *F;
  3169. switch (BuiltinID) {
  3170. default: llvm_unreachable("unexpected builtin");
  3171. case ARM::BI__builtin_arm_ldaex:
  3172. F = CGM.getIntrinsic(Intrinsic::arm_ldaexd);
  3173. break;
  3174. case ARM::BI__builtin_arm_ldrexd:
  3175. case ARM::BI__builtin_arm_ldrex:
  3176. case ARM::BI__ldrexd:
  3177. F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
  3178. break;
  3179. }
  3180. Value *LdPtr = EmitScalarExpr(E->getArg(0));
  3181. Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
  3182. "ldrexd");
  3183. Value *Val0 = Builder.CreateExtractValue(Val, 1);
  3184. Value *Val1 = Builder.CreateExtractValue(Val, 0);
  3185. Val0 = Builder.CreateZExt(Val0, Int64Ty);
  3186. Val1 = Builder.CreateZExt(Val1, Int64Ty);
  3187. Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
  3188. Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
  3189. Val = Builder.CreateOr(Val, Val1);
  3190. return Builder.CreateBitCast(Val, ConvertType(E->getType()));
  3191. }
  3192. if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
  3193. BuiltinID == ARM::BI__builtin_arm_ldaex) {
  3194. Value *LoadAddr = EmitScalarExpr(E->getArg(0));
  3195. QualType Ty = E->getType();
  3196. llvm::Type *RealResTy = ConvertType(Ty);
  3197. llvm::Type *IntResTy = llvm::IntegerType::get(getLLVMContext(),
  3198. getContext().getTypeSize(Ty));
  3199. LoadAddr = Builder.CreateBitCast(LoadAddr, IntResTy->getPointerTo());
  3200. Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex
  3201. ? Intrinsic::arm_ldaex
  3202. : Intrinsic::arm_ldrex,
  3203. LoadAddr->getType());
  3204. Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
  3205. if (RealResTy->isPointerTy())
  3206. return Builder.CreateIntToPtr(Val, RealResTy);
  3207. else {
  3208. Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
  3209. return Builder.CreateBitCast(Val, RealResTy);
  3210. }
  3211. }
  3212. if (BuiltinID == ARM::BI__builtin_arm_strexd ||
  3213. ((BuiltinID == ARM::BI__builtin_arm_stlex ||
  3214. BuiltinID == ARM::BI__builtin_arm_strex) &&
  3215. getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
  3216. Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
  3217. ? Intrinsic::arm_stlexd
  3218. : Intrinsic::arm_strexd);
  3219. llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, nullptr);
  3220. Value *Tmp = CreateMemTemp(E->getArg(0)->getType());
  3221. Value *Val = EmitScalarExpr(E->getArg(0));
  3222. Builder.CreateStore(Val, Tmp);
  3223. Value *LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
  3224. Val = Builder.CreateLoad(LdPtr);
  3225. Value *Arg0 = Builder.CreateExtractValue(Val, 0);
  3226. Value *Arg1 = Builder.CreateExtractValue(Val, 1);
  3227. Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy);
  3228. return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd");
  3229. }
  3230. if (BuiltinID == ARM::BI__builtin_arm_strex ||
  3231. BuiltinID == ARM::BI__builtin_arm_stlex) {
  3232. Value *StoreVal = EmitScalarExpr(E->getArg(0));
  3233. Value *StoreAddr = EmitScalarExpr(E->getArg(1));
  3234. QualType Ty = E->getArg(0)->getType();
  3235. llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
  3236. getContext().getTypeSize(Ty));
  3237. StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
  3238. if (StoreVal->getType()->isPointerTy())
  3239. StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
  3240. else {
  3241. StoreVal = Builder.CreateBitCast(StoreVal, StoreTy);
  3242. StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
  3243. }
  3244. Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
  3245. ? Intrinsic::arm_stlex
  3246. : Intrinsic::arm_strex,
  3247. StoreAddr->getType());
  3248. return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex");
  3249. }
  3250. if (BuiltinID == ARM::BI__builtin_arm_clrex) {
  3251. Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
  3252. return Builder.CreateCall(F);
  3253. }
  3254. // CRC32
  3255. Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
  3256. switch (BuiltinID) {
  3257. case ARM::BI__builtin_arm_crc32b:
  3258. CRCIntrinsicID = Intrinsic::arm_crc32b; break;
  3259. case ARM::BI__builtin_arm_crc32cb:
  3260. CRCIntrinsicID = Intrinsic::arm_crc32cb; break;
  3261. case ARM::BI__builtin_arm_crc32h:
  3262. CRCIntrinsicID = Intrinsic::arm_crc32h; break;
  3263. case ARM::BI__builtin_arm_crc32ch:
  3264. CRCIntrinsicID = Intrinsic::arm_crc32ch; break;
  3265. case ARM::BI__builtin_arm_crc32w:
  3266. case ARM::BI__builtin_arm_crc32d:
  3267. CRCIntrinsicID = Intrinsic::arm_crc32w; break;
  3268. case ARM::BI__builtin_arm_crc32cw:
  3269. case ARM::BI__builtin_arm_crc32cd:
  3270. CRCIntrinsicID = Intrinsic::arm_crc32cw; break;
  3271. }
  3272. if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
  3273. Value *Arg0 = EmitScalarExpr(E->getArg(0));
  3274. Value *Arg1 = EmitScalarExpr(E->getArg(1));
  3275. // crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w
  3276. // intrinsics, hence we need different codegen for these cases.
  3277. if (BuiltinID == ARM::BI__builtin_arm_crc32d ||
  3278. BuiltinID == ARM::BI__builtin_arm_crc32cd) {
  3279. Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
  3280. Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty);
  3281. Value *Arg1b = Builder.CreateLShr(Arg1, C1);
  3282. Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty);
  3283. Function *F = CGM.getIntrinsic(CRCIntrinsicID);
  3284. Value *Res = Builder.CreateCall(F, {Arg0, Arg1a});
  3285. return Builder.CreateCall(F, {Res, Arg1b});
  3286. } else {
  3287. Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty);
  3288. Function *F = CGM.getIntrinsic(CRCIntrinsicID);
  3289. return Builder.CreateCall(F, {Arg0, Arg1});
  3290. }
  3291. }
  3292. if (BuiltinID == ARM::BI__builtin_arm_rsr ||
  3293. BuiltinID == ARM::BI__builtin_arm_rsr64 ||
  3294. BuiltinID == ARM::BI__builtin_arm_rsrp ||
  3295. BuiltinID == ARM::BI__builtin_arm_wsr ||
  3296. BuiltinID == ARM::BI__builtin_arm_wsr64 ||
  3297. BuiltinID == ARM::BI__builtin_arm_wsrp) {
  3298. bool IsRead = BuiltinID == ARM::BI__builtin_arm_rsr ||
  3299. BuiltinID == ARM::BI__builtin_arm_rsr64 ||
  3300. BuiltinID == ARM::BI__builtin_arm_rsrp;
  3301. bool IsPointerBuiltin = BuiltinID == ARM::BI__builtin_arm_rsrp ||
  3302. BuiltinID == ARM::BI__builtin_arm_wsrp;
  3303. bool Is64Bit = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
  3304. BuiltinID == ARM::BI__builtin_arm_wsr64;
  3305. llvm::Type *ValueType;
  3306. llvm::Type *RegisterType;
  3307. if (IsPointerBuiltin) {
  3308. ValueType = VoidPtrTy;
  3309. RegisterType = Int32Ty;
  3310. } else if (Is64Bit) {
  3311. ValueType = RegisterType = Int64Ty;
  3312. } else {
  3313. ValueType = RegisterType = Int32Ty;
  3314. }
  3315. return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead);
  3316. }
  3317. // Find out if any arguments are required to be integer constant
  3318. // expressions.
  3319. unsigned ICEArguments = 0;
  3320. ASTContext::GetBuiltinTypeError Error;
  3321. getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
  3322. assert(Error == ASTContext::GE_None && "Should not codegen an error");
  3323. SmallVector<Value*, 4> Ops;
  3324. llvm::Value *Align = nullptr;
  3325. bool HasExtraArg = HasExtraNeonArgument(BuiltinID);
  3326. unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0);
  3327. for (unsigned i = 0, e = NumArgs; i != e; i++) {
  3328. if (i == 0) {
  3329. switch (BuiltinID) {
  3330. case NEON::BI__builtin_neon_vld1_v:
  3331. case NEON::BI__builtin_neon_vld1q_v:
  3332. case NEON::BI__builtin_neon_vld1q_lane_v:
  3333. case NEON::BI__builtin_neon_vld1_lane_v:
  3334. case NEON::BI__builtin_neon_vld1_dup_v:
  3335. case NEON::BI__builtin_neon_vld1q_dup_v:
  3336. case NEON::BI__builtin_neon_vst1_v:
  3337. case NEON::BI__builtin_neon_vst1q_v:
  3338. case NEON::BI__builtin_neon_vst1q_lane_v:
  3339. case NEON::BI__builtin_neon_vst1_lane_v:
  3340. case NEON::BI__builtin_neon_vst2_v:
  3341. case NEON::BI__builtin_neon_vst2q_v:
  3342. case NEON::BI__builtin_neon_vst2_lane_v:
  3343. case NEON::BI__builtin_neon_vst2q_lane_v:
  3344. case NEON::BI__builtin_neon_vst3_v:
  3345. case NEON::BI__builtin_neon_vst3q_v:
  3346. case NEON::BI__builtin_neon_vst3_lane_v:
  3347. case NEON::BI__builtin_neon_vst3q_lane_v:
  3348. case NEON::BI__builtin_neon_vst4_v:
  3349. case NEON::BI__builtin_neon_vst4q_v:
  3350. case NEON::BI__builtin_neon_vst4_lane_v:
  3351. case NEON::BI__builtin_neon_vst4q_lane_v:
  3352. // Get the alignment for the argument in addition to the value;
  3353. // we'll use it later.
  3354. std::pair<llvm::Value*, unsigned> Src =
  3355. EmitPointerWithAlignment(E->getArg(0));
  3356. Ops.push_back(Src.first);
  3357. Align = Builder.getInt32(Src.second);
  3358. continue;
  3359. }
  3360. }
  3361. if (i == 1) {
  3362. switch (BuiltinID) {
  3363. case NEON::BI__builtin_neon_vld2_v:
  3364. case NEON::BI__builtin_neon_vld2q_v:
  3365. case NEON::BI__builtin_neon_vld3_v:
  3366. case NEON::BI__builtin_neon_vld3q_v:
  3367. case NEON::BI__builtin_neon_vld4_v:
  3368. case NEON::BI__builtin_neon_vld4q_v:
  3369. case NEON::BI__builtin_neon_vld2_lane_v:
  3370. case NEON::BI__builtin_neon_vld2q_lane_v:
  3371. case NEON::BI__builtin_neon_vld3_lane_v:
  3372. case NEON::BI__builtin_neon_vld3q_lane_v:
  3373. case NEON::BI__builtin_neon_vld4_lane_v:
  3374. case NEON::BI__builtin_neon_vld4q_lane_v:
  3375. case NEON::BI__builtin_neon_vld2_dup_v:
  3376. case NEON::BI__builtin_neon_vld3_dup_v:
  3377. case NEON::BI__builtin_neon_vld4_dup_v:
  3378. // Get the alignment for the argument in addition to the value;
  3379. // we'll use it later.
  3380. std::pair<llvm::Value*, unsigned> Src =
  3381. EmitPointerWithAlignment(E->getArg(1));
  3382. Ops.push_back(Src.first);
  3383. Align = Builder.getInt32(Src.second);
  3384. continue;
  3385. }
  3386. }
  3387. if ((ICEArguments & (1 << i)) == 0) {
  3388. Ops.push_back(EmitScalarExpr(E->getArg(i)));
  3389. } else {
  3390. // If this is required to be a constant, constant fold it so that we know
  3391. // that the generated intrinsic gets a ConstantInt.
  3392. llvm::APSInt Result;
  3393. bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
  3394. assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
  3395. Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
  3396. }
  3397. }
  3398. switch (BuiltinID) {
  3399. default: break;
  3400. case NEON::BI__builtin_neon_vget_lane_i8:
  3401. case NEON::BI__builtin_neon_vget_lane_i16:
  3402. case NEON::BI__builtin_neon_vget_lane_i32:
  3403. case NEON::BI__builtin_neon_vget_lane_i64:
  3404. case NEON::BI__builtin_neon_vget_lane_f32:
  3405. case NEON::BI__builtin_neon_vgetq_lane_i8:
  3406. case NEON::BI__builtin_neon_vgetq_lane_i16:
  3407. case NEON::BI__builtin_neon_vgetq_lane_i32:
  3408. case NEON::BI__builtin_neon_vgetq_lane_i64:
  3409. case NEON::BI__builtin_neon_vgetq_lane_f32:
  3410. return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane");
  3411. case NEON::BI__builtin_neon_vset_lane_i8:
  3412. case NEON::BI__builtin_neon_vset_lane_i16:
  3413. case NEON::BI__builtin_neon_vset_lane_i32:
  3414. case NEON::BI__builtin_neon_vset_lane_i64:
  3415. case NEON::BI__builtin_neon_vset_lane_f32:
  3416. case NEON::BI__builtin_neon_vsetq_lane_i8:
  3417. case NEON::BI__builtin_neon_vsetq_lane_i16:
  3418. case NEON::BI__builtin_neon_vsetq_lane_i32:
  3419. case NEON::BI__builtin_neon_vsetq_lane_i64:
  3420. case NEON::BI__builtin_neon_vsetq_lane_f32:
  3421. return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
  3422. case NEON::BI__builtin_neon_vsha1h_u32:
  3423. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops,
  3424. "vsha1h");
  3425. case NEON::BI__builtin_neon_vsha1cq_u32:
  3426. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops,
  3427. "vsha1h");
  3428. case NEON::BI__builtin_neon_vsha1pq_u32:
  3429. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops,
  3430. "vsha1h");
  3431. case NEON::BI__builtin_neon_vsha1mq_u32:
  3432. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops,
  3433. "vsha1h");
  3434. // The ARM _MoveToCoprocessor builtins put the input register value as
  3435. // the first argument, but the LLVM intrinsic expects it as the third one.
  3436. case ARM::BI_MoveToCoprocessor:
  3437. case ARM::BI_MoveToCoprocessor2: {
  3438. Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI_MoveToCoprocessor ?
  3439. Intrinsic::arm_mcr : Intrinsic::arm_mcr2);
  3440. return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
  3441. Ops[3], Ops[4], Ops[5]});
  3442. }
  3443. }
  3444. // Get the last argument, which specifies the vector type.
  3445. assert(HasExtraArg);
  3446. llvm::APSInt Result;
  3447. const Expr *Arg = E->getArg(E->getNumArgs()-1);
  3448. if (!Arg->isIntegerConstantExpr(Result, getContext()))
  3449. return nullptr;
  3450. if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
  3451. BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
  3452. // Determine the overloaded type of this builtin.
  3453. llvm::Type *Ty;
  3454. if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
  3455. Ty = FloatTy;
  3456. else
  3457. Ty = DoubleTy;
  3458. // Determine whether this is an unsigned conversion or not.
  3459. bool usgn = Result.getZExtValue() == 1;
  3460. unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
  3461. // Call the appropriate intrinsic.
  3462. Function *F = CGM.getIntrinsic(Int, Ty);
  3463. return Builder.CreateCall(F, Ops, "vcvtr");
  3464. }
  3465. // Determine the type of this overloaded NEON intrinsic.
  3466. NeonTypeFlags Type(Result.getZExtValue());
  3467. bool usgn = Type.isUnsigned();
  3468. bool rightShift = false;
  3469. llvm::VectorType *VTy = GetNeonType(this, Type);
  3470. llvm::Type *Ty = VTy;
  3471. if (!Ty)
  3472. return nullptr;
  3473. // Many NEON builtins have identical semantics and uses in ARM and
  3474. // AArch64. Emit these in a single function.
  3475. auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap);
  3476. const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
  3477. IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted);
  3478. if (Builtin)
  3479. return EmitCommonNeonBuiltinExpr(
  3480. Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
  3481. Builtin->NameHint, Builtin->TypeModifier, E, Ops, Align);
  3482. unsigned Int;
  3483. switch (BuiltinID) {
  3484. default: return nullptr;
  3485. case NEON::BI__builtin_neon_vld1q_lane_v:
  3486. // Handle 64-bit integer elements as a special case. Use shuffles of
  3487. // one-element vectors to avoid poor code for i64 in the backend.
  3488. if (VTy->getElementType()->isIntegerTy(64)) {
  3489. // Extract the other lane.
  3490. Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
  3491. int Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
  3492. Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
  3493. Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
  3494. // Load the value as a one-element vector.
  3495. Ty = llvm::VectorType::get(VTy->getElementType(), 1);
  3496. Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty);
  3497. Value *Ld = Builder.CreateCall(F, {Ops[0], Align});
  3498. // Combine them.
  3499. SmallVector<Constant*, 2> Indices;
  3500. Indices.push_back(ConstantInt::get(Int32Ty, 1-Lane));
  3501. Indices.push_back(ConstantInt::get(Int32Ty, Lane));
  3502. SV = llvm::ConstantVector::get(Indices);
  3503. return Builder.CreateShuffleVector(Ops[1], Ld, SV, "vld1q_lane");
  3504. }
  3505. // fall through
  3506. case NEON::BI__builtin_neon_vld1_lane_v: {
  3507. Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
  3508. Ty = llvm::PointerType::getUnqual(VTy->getElementType());
  3509. Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
  3510. LoadInst *Ld = Builder.CreateLoad(Ops[0]);
  3511. Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
  3512. return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
  3513. }
  3514. case NEON::BI__builtin_neon_vld2_dup_v:
  3515. case NEON::BI__builtin_neon_vld3_dup_v:
  3516. case NEON::BI__builtin_neon_vld4_dup_v: {
  3517. // Handle 64-bit elements as a special-case. There is no "dup" needed.
  3518. if (VTy->getElementType()->getPrimitiveSizeInBits() == 64) {
  3519. switch (BuiltinID) {
  3520. case NEON::BI__builtin_neon_vld2_dup_v:
  3521. Int = Intrinsic::arm_neon_vld2;
  3522. break;
  3523. case NEON::BI__builtin_neon_vld3_dup_v:
  3524. Int = Intrinsic::arm_neon_vld3;
  3525. break;
  3526. case NEON::BI__builtin_neon_vld4_dup_v:
  3527. Int = Intrinsic::arm_neon_vld4;
  3528. break;
  3529. default: llvm_unreachable("unknown vld_dup intrinsic?");
  3530. }
  3531. Function *F = CGM.getIntrinsic(Int, Ty);
  3532. Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, "vld_dup");
  3533. Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
  3534. Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
  3535. return Builder.CreateStore(Ops[1], Ops[0]);
  3536. }
  3537. switch (BuiltinID) {
  3538. case NEON::BI__builtin_neon_vld2_dup_v:
  3539. Int = Intrinsic::arm_neon_vld2lane;
  3540. break;
  3541. case NEON::BI__builtin_neon_vld3_dup_v:
  3542. Int = Intrinsic::arm_neon_vld3lane;
  3543. break;
  3544. case NEON::BI__builtin_neon_vld4_dup_v:
  3545. Int = Intrinsic::arm_neon_vld4lane;
  3546. break;
  3547. default: llvm_unreachable("unknown vld_dup intrinsic?");
  3548. }
  3549. Function *F = CGM.getIntrinsic(Int, Ty);
  3550. llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
  3551. SmallVector<Value*, 6> Args;
  3552. Args.push_back(Ops[1]);
  3553. Args.append(STy->getNumElements(), UndefValue::get(Ty));
  3554. llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
  3555. Args.push_back(CI);
  3556. Args.push_back(Align);
  3557. Ops[1] = Builder.CreateCall(F, Args, "vld_dup");
  3558. // splat lane 0 to all elts in each vector of the result.
  3559. for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
  3560. Value *Val = Builder.CreateExtractValue(Ops[1], i);
  3561. Value *Elt = Builder.CreateBitCast(Val, Ty);
  3562. Elt = EmitNeonSplat(Elt, CI);
  3563. Elt = Builder.CreateBitCast(Elt, Val->getType());
  3564. Ops[1] = Builder.CreateInsertValue(Ops[1], Elt, i);
  3565. }
  3566. Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
  3567. Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
  3568. return Builder.CreateStore(Ops[1], Ops[0]);
  3569. }
  3570. case NEON::BI__builtin_neon_vqrshrn_n_v:
  3571. Int =
  3572. usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
  3573. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
  3574. 1, true);
  3575. case NEON::BI__builtin_neon_vqrshrun_n_v:
  3576. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
  3577. Ops, "vqrshrun_n", 1, true);
  3578. case NEON::BI__builtin_neon_vqshrn_n_v:
  3579. Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
  3580. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
  3581. 1, true);
  3582. case NEON::BI__builtin_neon_vqshrun_n_v:
  3583. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
  3584. Ops, "vqshrun_n", 1, true);
  3585. case NEON::BI__builtin_neon_vrecpe_v:
  3586. case NEON::BI__builtin_neon_vrecpeq_v:
  3587. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
  3588. Ops, "vrecpe");
  3589. case NEON::BI__builtin_neon_vrshrn_n_v:
  3590. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
  3591. Ops, "vrshrn_n", 1, true);
  3592. case NEON::BI__builtin_neon_vrsra_n_v:
  3593. case NEON::BI__builtin_neon_vrsraq_n_v:
  3594. Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
  3595. Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
  3596. Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
  3597. Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
  3598. Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]});
  3599. return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
  3600. case NEON::BI__builtin_neon_vsri_n_v:
  3601. case NEON::BI__builtin_neon_vsriq_n_v:
  3602. rightShift = true;
  3603. case NEON::BI__builtin_neon_vsli_n_v:
  3604. case NEON::BI__builtin_neon_vsliq_n_v:
  3605. Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
  3606. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
  3607. Ops, "vsli_n");
  3608. case NEON::BI__builtin_neon_vsra_n_v:
  3609. case NEON::BI__builtin_neon_vsraq_n_v:
  3610. Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
  3611. Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
  3612. return Builder.CreateAdd(Ops[0], Ops[1]);
  3613. case NEON::BI__builtin_neon_vst1q_lane_v:
  3614. // Handle 64-bit integer elements as a special case. Use a shuffle to get
  3615. // a one-element vector and avoid poor code for i64 in the backend.
  3616. if (VTy->getElementType()->isIntegerTy(64)) {
  3617. Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
  3618. Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
  3619. Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
  3620. Ops[2] = Align;
  3621. return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
  3622. Ops[1]->getType()), Ops);
  3623. }
  3624. // fall through
  3625. case NEON::BI__builtin_neon_vst1_lane_v: {
  3626. Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
  3627. Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
  3628. Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
  3629. StoreInst *St = Builder.CreateStore(Ops[1],
  3630. Builder.CreateBitCast(Ops[0], Ty));
  3631. St->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
  3632. return St;
  3633. }
  3634. case NEON::BI__builtin_neon_vtbl1_v:
  3635. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
  3636. Ops, "vtbl1");
  3637. case NEON::BI__builtin_neon_vtbl2_v:
  3638. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
  3639. Ops, "vtbl2");
  3640. case NEON::BI__builtin_neon_vtbl3_v:
  3641. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
  3642. Ops, "vtbl3");
  3643. case NEON::BI__builtin_neon_vtbl4_v:
  3644. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
  3645. Ops, "vtbl4");
  3646. case NEON::BI__builtin_neon_vtbx1_v:
  3647. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
  3648. Ops, "vtbx1");
  3649. case NEON::BI__builtin_neon_vtbx2_v:
  3650. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
  3651. Ops, "vtbx2");
  3652. case NEON::BI__builtin_neon_vtbx3_v:
  3653. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
  3654. Ops, "vtbx3");
  3655. case NEON::BI__builtin_neon_vtbx4_v:
  3656. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
  3657. Ops, "vtbx4");
  3658. }
  3659. }
  3660. static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
  3661. const CallExpr *E,
  3662. SmallVectorImpl<Value *> &Ops) {
  3663. unsigned int Int = 0;
  3664. const char *s = nullptr;
  3665. switch (BuiltinID) {
  3666. default:
  3667. return nullptr;
  3668. case NEON::BI__builtin_neon_vtbl1_v:
  3669. case NEON::BI__builtin_neon_vqtbl1_v:
  3670. case NEON::BI__builtin_neon_vqtbl1q_v:
  3671. case NEON::BI__builtin_neon_vtbl2_v:
  3672. case NEON::BI__builtin_neon_vqtbl2_v:
  3673. case NEON::BI__builtin_neon_vqtbl2q_v:
  3674. case NEON::BI__builtin_neon_vtbl3_v:
  3675. case NEON::BI__builtin_neon_vqtbl3_v:
  3676. case NEON::BI__builtin_neon_vqtbl3q_v:
  3677. case NEON::BI__builtin_neon_vtbl4_v:
  3678. case NEON::BI__builtin_neon_vqtbl4_v:
  3679. case NEON::BI__builtin_neon_vqtbl4q_v:
  3680. break;
  3681. case NEON::BI__builtin_neon_vtbx1_v:
  3682. case NEON::BI__builtin_neon_vqtbx1_v:
  3683. case NEON::BI__builtin_neon_vqtbx1q_v:
  3684. case NEON::BI__builtin_neon_vtbx2_v:
  3685. case NEON::BI__builtin_neon_vqtbx2_v:
  3686. case NEON::BI__builtin_neon_vqtbx2q_v:
  3687. case NEON::BI__builtin_neon_vtbx3_v:
  3688. case NEON::BI__builtin_neon_vqtbx3_v:
  3689. case NEON::BI__builtin_neon_vqtbx3q_v:
  3690. case NEON::BI__builtin_neon_vtbx4_v:
  3691. case NEON::BI__builtin_neon_vqtbx4_v:
  3692. case NEON::BI__builtin_neon_vqtbx4q_v:
  3693. break;
  3694. }
  3695. assert(E->getNumArgs() >= 3);
  3696. // Get the last argument, which specifies the vector type.
  3697. llvm::APSInt Result;
  3698. const Expr *Arg = E->getArg(E->getNumArgs() - 1);
  3699. if (!Arg->isIntegerConstantExpr(Result, CGF.getContext()))
  3700. return nullptr;
  3701. // Determine the type of this overloaded NEON intrinsic.
  3702. NeonTypeFlags Type(Result.getZExtValue());
  3703. llvm::VectorType *VTy = GetNeonType(&CGF, Type);
  3704. llvm::Type *Ty = VTy;
  3705. if (!Ty)
  3706. return nullptr;
  3707. unsigned nElts = VTy->getNumElements();
  3708. CodeGen::CGBuilderTy &Builder = CGF.Builder;
  3709. // AArch64 scalar builtins are not overloaded, they do not have an extra
  3710. // argument that specifies the vector type, need to handle each case.
  3711. SmallVector<Value *, 2> TblOps;
  3712. switch (BuiltinID) {
  3713. case NEON::BI__builtin_neon_vtbl1_v: {
  3714. TblOps.push_back(Ops[0]);
  3715. return packTBLDVectorList(CGF, TblOps, nullptr, Ops[1], Ty,
  3716. Intrinsic::aarch64_neon_tbl1, "vtbl1");
  3717. }
  3718. case NEON::BI__builtin_neon_vtbl2_v: {
  3719. TblOps.push_back(Ops[0]);
  3720. TblOps.push_back(Ops[1]);
  3721. return packTBLDVectorList(CGF, TblOps, nullptr, Ops[2], Ty,
  3722. Intrinsic::aarch64_neon_tbl1, "vtbl1");
  3723. }
  3724. case NEON::BI__builtin_neon_vtbl3_v: {
  3725. TblOps.push_back(Ops[0]);
  3726. TblOps.push_back(Ops[1]);
  3727. TblOps.push_back(Ops[2]);
  3728. return packTBLDVectorList(CGF, TblOps, nullptr, Ops[3], Ty,
  3729. Intrinsic::aarch64_neon_tbl2, "vtbl2");
  3730. }
  3731. case NEON::BI__builtin_neon_vtbl4_v: {
  3732. TblOps.push_back(Ops[0]);
  3733. TblOps.push_back(Ops[1]);
  3734. TblOps.push_back(Ops[2]);
  3735. TblOps.push_back(Ops[3]);
  3736. return packTBLDVectorList(CGF, TblOps, nullptr, Ops[4], Ty,
  3737. Intrinsic::aarch64_neon_tbl2, "vtbl2");
  3738. }
  3739. case NEON::BI__builtin_neon_vtbx1_v: {
  3740. TblOps.push_back(Ops[1]);
  3741. Value *TblRes = packTBLDVectorList(CGF, TblOps, nullptr, Ops[2], Ty,
  3742. Intrinsic::aarch64_neon_tbl1, "vtbl1");
  3743. llvm::Constant *Eight = ConstantInt::get(VTy->getElementType(), 8);
  3744. Value* EightV = llvm::ConstantVector::getSplat(nElts, Eight);
  3745. Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV);
  3746. CmpRes = Builder.CreateSExt(CmpRes, Ty);
  3747. Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
  3748. Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
  3749. return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
  3750. }
  3751. case NEON::BI__builtin_neon_vtbx2_v: {
  3752. TblOps.push_back(Ops[1]);
  3753. TblOps.push_back(Ops[2]);
  3754. return packTBLDVectorList(CGF, TblOps, Ops[0], Ops[3], Ty,
  3755. Intrinsic::aarch64_neon_tbx1, "vtbx1");
  3756. }
  3757. case NEON::BI__builtin_neon_vtbx3_v: {
  3758. TblOps.push_back(Ops[1]);
  3759. TblOps.push_back(Ops[2]);
  3760. TblOps.push_back(Ops[3]);
  3761. Value *TblRes = packTBLDVectorList(CGF, TblOps, nullptr, Ops[4], Ty,
  3762. Intrinsic::aarch64_neon_tbl2, "vtbl2");
  3763. llvm::Constant *TwentyFour = ConstantInt::get(VTy->getElementType(), 24);
  3764. Value* TwentyFourV = llvm::ConstantVector::getSplat(nElts, TwentyFour);
  3765. Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
  3766. TwentyFourV);
  3767. CmpRes = Builder.CreateSExt(CmpRes, Ty);
  3768. Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
  3769. Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
  3770. return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
  3771. }
  3772. case NEON::BI__builtin_neon_vtbx4_v: {
  3773. TblOps.push_back(Ops[1]);
  3774. TblOps.push_back(Ops[2]);
  3775. TblOps.push_back(Ops[3]);
  3776. TblOps.push_back(Ops[4]);
  3777. return packTBLDVectorList(CGF, TblOps, Ops[0], Ops[5], Ty,
  3778. Intrinsic::aarch64_neon_tbx2, "vtbx2");
  3779. }
  3780. case NEON::BI__builtin_neon_vqtbl1_v:
  3781. case NEON::BI__builtin_neon_vqtbl1q_v:
  3782. Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break;
  3783. case NEON::BI__builtin_neon_vqtbl2_v:
  3784. case NEON::BI__builtin_neon_vqtbl2q_v: {
  3785. Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break;
  3786. case NEON::BI__builtin_neon_vqtbl3_v:
  3787. case NEON::BI__builtin_neon_vqtbl3q_v:
  3788. Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break;
  3789. case NEON::BI__builtin_neon_vqtbl4_v:
  3790. case NEON::BI__builtin_neon_vqtbl4q_v:
  3791. Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break;
  3792. case NEON::BI__builtin_neon_vqtbx1_v:
  3793. case NEON::BI__builtin_neon_vqtbx1q_v:
  3794. Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break;
  3795. case NEON::BI__builtin_neon_vqtbx2_v:
  3796. case NEON::BI__builtin_neon_vqtbx2q_v:
  3797. Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break;
  3798. case NEON::BI__builtin_neon_vqtbx3_v:
  3799. case NEON::BI__builtin_neon_vqtbx3q_v:
  3800. Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break;
  3801. case NEON::BI__builtin_neon_vqtbx4_v:
  3802. case NEON::BI__builtin_neon_vqtbx4q_v:
  3803. Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break;
  3804. }
  3805. }
  3806. if (!Int)
  3807. return nullptr;
  3808. Function *F = CGF.CGM.getIntrinsic(Int, Ty);
  3809. return CGF.EmitNeonCall(F, Ops, s);
  3810. }
  3811. Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
  3812. llvm::Type *VTy = llvm::VectorType::get(Int16Ty, 4);
  3813. Op = Builder.CreateBitCast(Op, Int16Ty);
  3814. Value *V = UndefValue::get(VTy);
  3815. llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
  3816. Op = Builder.CreateInsertElement(V, Op, CI);
  3817. return Op;
  3818. }
  3819. Value *CodeGenFunction::vectorWrapScalar8(Value *Op) {
  3820. llvm::Type *VTy = llvm::VectorType::get(Int8Ty, 8);
  3821. Op = Builder.CreateBitCast(Op, Int8Ty);
  3822. Value *V = UndefValue::get(VTy);
  3823. llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
  3824. Op = Builder.CreateInsertElement(V, Op, CI);
  3825. return Op;
  3826. }
  3827. Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
  3828. const CallExpr *E) {
  3829. unsigned HintID = static_cast<unsigned>(-1);
  3830. switch (BuiltinID) {
  3831. default: break;
  3832. case AArch64::BI__builtin_arm_nop:
  3833. HintID = 0;
  3834. break;
  3835. case AArch64::BI__builtin_arm_yield:
  3836. HintID = 1;
  3837. break;
  3838. case AArch64::BI__builtin_arm_wfe:
  3839. HintID = 2;
  3840. break;
  3841. case AArch64::BI__builtin_arm_wfi:
  3842. HintID = 3;
  3843. break;
  3844. case AArch64::BI__builtin_arm_sev:
  3845. HintID = 4;
  3846. break;
  3847. case AArch64::BI__builtin_arm_sevl:
  3848. HintID = 5;
  3849. break;
  3850. }
  3851. if (HintID != static_cast<unsigned>(-1)) {
  3852. Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hint);
  3853. return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
  3854. }
  3855. if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
  3856. Value *Address = EmitScalarExpr(E->getArg(0));
  3857. Value *RW = EmitScalarExpr(E->getArg(1));
  3858. Value *CacheLevel = EmitScalarExpr(E->getArg(2));
  3859. Value *RetentionPolicy = EmitScalarExpr(E->getArg(3));
  3860. Value *IsData = EmitScalarExpr(E->getArg(4));
  3861. Value *Locality = nullptr;
  3862. if (cast<llvm::ConstantInt>(RetentionPolicy)->isZero()) {
  3863. // Temporal fetch, needs to convert cache level to locality.
  3864. Locality = llvm::ConstantInt::get(Int32Ty,
  3865. -cast<llvm::ConstantInt>(CacheLevel)->getValue() + 3);
  3866. } else {
  3867. // Streaming fetch.
  3868. Locality = llvm::ConstantInt::get(Int32Ty, 0);
  3869. }
  3870. // FIXME: We need AArch64 specific LLVM intrinsic if we want to specify
  3871. // PLDL3STRM or PLDL2STRM.
  3872. Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
  3873. return Builder.CreateCall(F, {Address, RW, Locality, IsData});
  3874. }
  3875. if (BuiltinID == AArch64::BI__builtin_arm_rbit) {
  3876. assert((getContext().getTypeSize(E->getType()) == 32) &&
  3877. "rbit of unusual size!");
  3878. llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
  3879. return Builder.CreateCall(
  3880. CGM.getIntrinsic(Intrinsic::aarch64_rbit, Arg->getType()), Arg, "rbit");
  3881. }
  3882. if (BuiltinID == AArch64::BI__builtin_arm_rbit64) {
  3883. assert((getContext().getTypeSize(E->getType()) == 64) &&
  3884. "rbit of unusual size!");
  3885. llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
  3886. return Builder.CreateCall(
  3887. CGM.getIntrinsic(Intrinsic::aarch64_rbit, Arg->getType()), Arg, "rbit");
  3888. }
  3889. if (BuiltinID == AArch64::BI__clear_cache) {
  3890. assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
  3891. const FunctionDecl *FD = E->getDirectCallee();
  3892. SmallVector<Value*, 2> Ops;
  3893. for (unsigned i = 0; i < 2; i++)
  3894. Ops.push_back(EmitScalarExpr(E->getArg(i)));
  3895. llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
  3896. llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
  3897. StringRef Name = FD->getName();
  3898. return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
  3899. }
  3900. if ((BuiltinID == AArch64::BI__builtin_arm_ldrex ||
  3901. BuiltinID == AArch64::BI__builtin_arm_ldaex) &&
  3902. getContext().getTypeSize(E->getType()) == 128) {
  3903. Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
  3904. ? Intrinsic::aarch64_ldaxp
  3905. : Intrinsic::aarch64_ldxp);
  3906. Value *LdPtr = EmitScalarExpr(E->getArg(0));
  3907. Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
  3908. "ldxp");
  3909. Value *Val0 = Builder.CreateExtractValue(Val, 1);
  3910. Value *Val1 = Builder.CreateExtractValue(Val, 0);
  3911. llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
  3912. Val0 = Builder.CreateZExt(Val0, Int128Ty);
  3913. Val1 = Builder.CreateZExt(Val1, Int128Ty);
  3914. Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64);
  3915. Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
  3916. Val = Builder.CreateOr(Val, Val1);
  3917. return Builder.CreateBitCast(Val, ConvertType(E->getType()));
  3918. } else if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
  3919. BuiltinID == AArch64::BI__builtin_arm_ldaex) {
  3920. Value *LoadAddr = EmitScalarExpr(E->getArg(0));
  3921. QualType Ty = E->getType();
  3922. llvm::Type *RealResTy = ConvertType(Ty);
  3923. llvm::Type *IntResTy = llvm::IntegerType::get(getLLVMContext(),
  3924. getContext().getTypeSize(Ty));
  3925. LoadAddr = Builder.CreateBitCast(LoadAddr, IntResTy->getPointerTo());
  3926. Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
  3927. ? Intrinsic::aarch64_ldaxr
  3928. : Intrinsic::aarch64_ldxr,
  3929. LoadAddr->getType());
  3930. Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
  3931. if (RealResTy->isPointerTy())
  3932. return Builder.CreateIntToPtr(Val, RealResTy);
  3933. Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
  3934. return Builder.CreateBitCast(Val, RealResTy);
  3935. }
  3936. if ((BuiltinID == AArch64::BI__builtin_arm_strex ||
  3937. BuiltinID == AArch64::BI__builtin_arm_stlex) &&
  3938. getContext().getTypeSize(E->getArg(0)->getType()) == 128) {
  3939. Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
  3940. ? Intrinsic::aarch64_stlxp
  3941. : Intrinsic::aarch64_stxp);
  3942. llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty, nullptr);
  3943. Value *One = llvm::ConstantInt::get(Int32Ty, 1);
  3944. Value *Tmp = Builder.CreateAlloca(ConvertType(E->getArg(0)->getType()),
  3945. One);
  3946. Value *Val = EmitScalarExpr(E->getArg(0));
  3947. Builder.CreateStore(Val, Tmp);
  3948. Value *LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
  3949. Val = Builder.CreateLoad(LdPtr);
  3950. Value *Arg0 = Builder.CreateExtractValue(Val, 0);
  3951. Value *Arg1 = Builder.CreateExtractValue(Val, 1);
  3952. Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)),
  3953. Int8PtrTy);
  3954. return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp");
  3955. }
  3956. if (BuiltinID == AArch64::BI__builtin_arm_strex ||
  3957. BuiltinID == AArch64::BI__builtin_arm_stlex) {
  3958. Value *StoreVal = EmitScalarExpr(E->getArg(0));
  3959. Value *StoreAddr = EmitScalarExpr(E->getArg(1));
  3960. QualType Ty = E->getArg(0)->getType();
  3961. llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
  3962. getContext().getTypeSize(Ty));
  3963. StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
  3964. if (StoreVal->getType()->isPointerTy())
  3965. StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty);
  3966. else {
  3967. StoreVal = Builder.CreateBitCast(StoreVal, StoreTy);
  3968. StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty);
  3969. }
  3970. Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
  3971. ? Intrinsic::aarch64_stlxr
  3972. : Intrinsic::aarch64_stxr,
  3973. StoreAddr->getType());
  3974. return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
  3975. }
  3976. if (BuiltinID == AArch64::BI__builtin_arm_clrex) {
  3977. Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex);
  3978. return Builder.CreateCall(F);
  3979. }
  3980. // CRC32
  3981. Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
  3982. switch (BuiltinID) {
  3983. case AArch64::BI__builtin_arm_crc32b:
  3984. CRCIntrinsicID = Intrinsic::aarch64_crc32b; break;
  3985. case AArch64::BI__builtin_arm_crc32cb:
  3986. CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break;
  3987. case AArch64::BI__builtin_arm_crc32h:
  3988. CRCIntrinsicID = Intrinsic::aarch64_crc32h; break;
  3989. case AArch64::BI__builtin_arm_crc32ch:
  3990. CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break;
  3991. case AArch64::BI__builtin_arm_crc32w:
  3992. CRCIntrinsicID = Intrinsic::aarch64_crc32w; break;
  3993. case AArch64::BI__builtin_arm_crc32cw:
  3994. CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break;
  3995. case AArch64::BI__builtin_arm_crc32d:
  3996. CRCIntrinsicID = Intrinsic::aarch64_crc32x; break;
  3997. case AArch64::BI__builtin_arm_crc32cd:
  3998. CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break;
  3999. }
  4000. if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
  4001. Value *Arg0 = EmitScalarExpr(E->getArg(0));
  4002. Value *Arg1 = EmitScalarExpr(E->getArg(1));
  4003. Function *F = CGM.getIntrinsic(CRCIntrinsicID);
  4004. llvm::Type *DataTy = F->getFunctionType()->getParamType(1);
  4005. Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy);
  4006. return Builder.CreateCall(F, {Arg0, Arg1});
  4007. }
  4008. if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
  4009. BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
  4010. BuiltinID == AArch64::BI__builtin_arm_rsrp ||
  4011. BuiltinID == AArch64::BI__builtin_arm_wsr ||
  4012. BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
  4013. BuiltinID == AArch64::BI__builtin_arm_wsrp) {
  4014. bool IsRead = BuiltinID == AArch64::BI__builtin_arm_rsr ||
  4015. BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
  4016. BuiltinID == AArch64::BI__builtin_arm_rsrp;
  4017. bool IsPointerBuiltin = BuiltinID == AArch64::BI__builtin_arm_rsrp ||
  4018. BuiltinID == AArch64::BI__builtin_arm_wsrp;
  4019. bool Is64Bit = BuiltinID != AArch64::BI__builtin_arm_rsr &&
  4020. BuiltinID != AArch64::BI__builtin_arm_wsr;
  4021. llvm::Type *ValueType;
  4022. llvm::Type *RegisterType = Int64Ty;
  4023. if (IsPointerBuiltin) {
  4024. ValueType = VoidPtrTy;
  4025. } else if (Is64Bit) {
  4026. ValueType = Int64Ty;
  4027. } else {
  4028. ValueType = Int32Ty;
  4029. }
  4030. return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead);
  4031. }
  4032. // Find out if any arguments are required to be integer constant
  4033. // expressions.
  4034. unsigned ICEArguments = 0;
  4035. ASTContext::GetBuiltinTypeError Error;
  4036. getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
  4037. assert(Error == ASTContext::GE_None && "Should not codegen an error");
  4038. llvm::SmallVector<Value*, 4> Ops;
  4039. for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
  4040. if ((ICEArguments & (1 << i)) == 0) {
  4041. Ops.push_back(EmitScalarExpr(E->getArg(i)));
  4042. } else {
  4043. // If this is required to be a constant, constant fold it so that we know
  4044. // that the generated intrinsic gets a ConstantInt.
  4045. llvm::APSInt Result;
  4046. bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
  4047. assert(IsConst && "Constant arg isn't actually constant?");
  4048. (void)IsConst;
  4049. Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
  4050. }
  4051. }
  4052. auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap);
  4053. const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
  4054. SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted);
  4055. if (Builtin) {
  4056. Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1)));
  4057. Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E);
  4058. assert(Result && "SISD intrinsic should have been handled");
  4059. return Result;
  4060. }
  4061. llvm::APSInt Result;
  4062. const Expr *Arg = E->getArg(E->getNumArgs()-1);
  4063. NeonTypeFlags Type(0);
  4064. if (Arg->isIntegerConstantExpr(Result, getContext()))
  4065. // Determine the type of this overloaded NEON intrinsic.
  4066. Type = NeonTypeFlags(Result.getZExtValue());
  4067. bool usgn = Type.isUnsigned();
  4068. bool quad = Type.isQuad();
  4069. // Handle non-overloaded intrinsics first.
  4070. switch (BuiltinID) {
  4071. default: break;
  4072. case NEON::BI__builtin_neon_vldrq_p128: {
  4073. llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
  4074. Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy);
  4075. return Builder.CreateLoad(Ptr);
  4076. }
  4077. case NEON::BI__builtin_neon_vstrq_p128: {
  4078. llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
  4079. Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy);
  4080. return Builder.CreateStore(EmitScalarExpr(E->getArg(1)), Ptr);
  4081. }
  4082. case NEON::BI__builtin_neon_vcvts_u32_f32:
  4083. case NEON::BI__builtin_neon_vcvtd_u64_f64:
  4084. usgn = true;
  4085. // FALL THROUGH
  4086. case NEON::BI__builtin_neon_vcvts_s32_f32:
  4087. case NEON::BI__builtin_neon_vcvtd_s64_f64: {
  4088. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  4089. bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
  4090. llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
  4091. llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
  4092. Ops[0] = Builder.CreateBitCast(Ops[0], FTy);
  4093. if (usgn)
  4094. return Builder.CreateFPToUI(Ops[0], InTy);
  4095. return Builder.CreateFPToSI(Ops[0], InTy);
  4096. }
  4097. case NEON::BI__builtin_neon_vcvts_f32_u32:
  4098. case NEON::BI__builtin_neon_vcvtd_f64_u64:
  4099. usgn = true;
  4100. // FALL THROUGH
  4101. case NEON::BI__builtin_neon_vcvts_f32_s32:
  4102. case NEON::BI__builtin_neon_vcvtd_f64_s64: {
  4103. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  4104. bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
  4105. llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
  4106. llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
  4107. Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
  4108. if (usgn)
  4109. return Builder.CreateUIToFP(Ops[0], FTy);
  4110. return Builder.CreateSIToFP(Ops[0], FTy);
  4111. }
  4112. case NEON::BI__builtin_neon_vpaddd_s64: {
  4113. llvm::Type *Ty =
  4114. llvm::VectorType::get(llvm::Type::getInt64Ty(getLLVMContext()), 2);
  4115. Value *Vec = EmitScalarExpr(E->getArg(0));
  4116. // The vector is v2f64, so make sure it's bitcast to that.
  4117. Vec = Builder.CreateBitCast(Vec, Ty, "v2i64");
  4118. llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
  4119. llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
  4120. Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
  4121. Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
  4122. // Pairwise addition of a v2f64 into a scalar f64.
  4123. return Builder.CreateAdd(Op0, Op1, "vpaddd");
  4124. }
  4125. case NEON::BI__builtin_neon_vpaddd_f64: {
  4126. llvm::Type *Ty =
  4127. llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 2);
  4128. Value *Vec = EmitScalarExpr(E->getArg(0));
  4129. // The vector is v2f64, so make sure it's bitcast to that.
  4130. Vec = Builder.CreateBitCast(Vec, Ty, "v2f64");
  4131. llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
  4132. llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
  4133. Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
  4134. Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
  4135. // Pairwise addition of a v2f64 into a scalar f64.
  4136. return Builder.CreateFAdd(Op0, Op1, "vpaddd");
  4137. }
  4138. case NEON::BI__builtin_neon_vpadds_f32: {
  4139. llvm::Type *Ty =
  4140. llvm::VectorType::get(llvm::Type::getFloatTy(getLLVMContext()), 2);
  4141. Value *Vec = EmitScalarExpr(E->getArg(0));
  4142. // The vector is v2f32, so make sure it's bitcast to that.
  4143. Vec = Builder.CreateBitCast(Vec, Ty, "v2f32");
  4144. llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
  4145. llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
  4146. Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
  4147. Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
  4148. // Pairwise addition of a v2f32 into a scalar f32.
  4149. return Builder.CreateFAdd(Op0, Op1, "vpaddd");
  4150. }
  4151. case NEON::BI__builtin_neon_vceqzd_s64:
  4152. case NEON::BI__builtin_neon_vceqzd_f64:
  4153. case NEON::BI__builtin_neon_vceqzs_f32:
  4154. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  4155. return EmitAArch64CompareBuiltinExpr(
  4156. Ops[0], ConvertType(E->getCallReturnType(getContext())),
  4157. ICmpInst::FCMP_OEQ, ICmpInst::ICMP_EQ, "vceqz");
  4158. case NEON::BI__builtin_neon_vcgezd_s64:
  4159. case NEON::BI__builtin_neon_vcgezd_f64:
  4160. case NEON::BI__builtin_neon_vcgezs_f32:
  4161. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  4162. return EmitAArch64CompareBuiltinExpr(
  4163. Ops[0], ConvertType(E->getCallReturnType(getContext())),
  4164. ICmpInst::FCMP_OGE, ICmpInst::ICMP_SGE, "vcgez");
  4165. case NEON::BI__builtin_neon_vclezd_s64:
  4166. case NEON::BI__builtin_neon_vclezd_f64:
  4167. case NEON::BI__builtin_neon_vclezs_f32:
  4168. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  4169. return EmitAArch64CompareBuiltinExpr(
  4170. Ops[0], ConvertType(E->getCallReturnType(getContext())),
  4171. ICmpInst::FCMP_OLE, ICmpInst::ICMP_SLE, "vclez");
  4172. case NEON::BI__builtin_neon_vcgtzd_s64:
  4173. case NEON::BI__builtin_neon_vcgtzd_f64:
  4174. case NEON::BI__builtin_neon_vcgtzs_f32:
  4175. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  4176. return EmitAArch64CompareBuiltinExpr(
  4177. Ops[0], ConvertType(E->getCallReturnType(getContext())),
  4178. ICmpInst::FCMP_OGT, ICmpInst::ICMP_SGT, "vcgtz");
  4179. case NEON::BI__builtin_neon_vcltzd_s64:
  4180. case NEON::BI__builtin_neon_vcltzd_f64:
  4181. case NEON::BI__builtin_neon_vcltzs_f32:
  4182. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  4183. return EmitAArch64CompareBuiltinExpr(
  4184. Ops[0], ConvertType(E->getCallReturnType(getContext())),
  4185. ICmpInst::FCMP_OLT, ICmpInst::ICMP_SLT, "vcltz");
  4186. case NEON::BI__builtin_neon_vceqzd_u64: {
  4187. llvm::Type *Ty = llvm::Type::getInt64Ty(getLLVMContext());
  4188. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  4189. Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
  4190. Ops[0] = Builder.CreateICmp(llvm::ICmpInst::ICMP_EQ, Ops[0],
  4191. llvm::Constant::getNullValue(Ty));
  4192. return Builder.CreateSExt(Ops[0], Ty, "vceqzd");
  4193. }
  4194. case NEON::BI__builtin_neon_vceqd_f64:
  4195. case NEON::BI__builtin_neon_vcled_f64:
  4196. case NEON::BI__builtin_neon_vcltd_f64:
  4197. case NEON::BI__builtin_neon_vcged_f64:
  4198. case NEON::BI__builtin_neon_vcgtd_f64: {
  4199. llvm::CmpInst::Predicate P;
  4200. switch (BuiltinID) {
  4201. default: llvm_unreachable("missing builtin ID in switch!");
  4202. case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break;
  4203. case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break;
  4204. case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break;
  4205. case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break;
  4206. case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break;
  4207. }
  4208. Ops.push_back(EmitScalarExpr(E->getArg(1)));
  4209. Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
  4210. Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
  4211. Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
  4212. return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd");
  4213. }
  4214. case NEON::BI__builtin_neon_vceqs_f32:
  4215. case NEON::BI__builtin_neon_vcles_f32:
  4216. case NEON::BI__builtin_neon_vclts_f32:
  4217. case NEON::BI__builtin_neon_vcges_f32:
  4218. case NEON::BI__builtin_neon_vcgts_f32: {
  4219. llvm::CmpInst::Predicate P;
  4220. switch (BuiltinID) {
  4221. default: llvm_unreachable("missing builtin ID in switch!");
  4222. case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break;
  4223. case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break;
  4224. case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break;
  4225. case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break;
  4226. case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break;
  4227. }
  4228. Ops.push_back(EmitScalarExpr(E->getArg(1)));
  4229. Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
  4230. Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy);
  4231. Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
  4232. return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd");
  4233. }
  4234. case NEON::BI__builtin_neon_vceqd_s64:
  4235. case NEON::BI__builtin_neon_vceqd_u64:
  4236. case NEON::BI__builtin_neon_vcgtd_s64:
  4237. case NEON::BI__builtin_neon_vcgtd_u64:
  4238. case NEON::BI__builtin_neon_vcltd_s64:
  4239. case NEON::BI__builtin_neon_vcltd_u64:
  4240. case NEON::BI__builtin_neon_vcged_u64:
  4241. case NEON::BI__builtin_neon_vcged_s64:
  4242. case NEON::BI__builtin_neon_vcled_u64:
  4243. case NEON::BI__builtin_neon_vcled_s64: {
  4244. llvm::CmpInst::Predicate P;
  4245. switch (BuiltinID) {
  4246. default: llvm_unreachable("missing builtin ID in switch!");
  4247. case NEON::BI__builtin_neon_vceqd_s64:
  4248. case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break;
  4249. case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break;
  4250. case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break;
  4251. case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break;
  4252. case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break;
  4253. case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break;
  4254. case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break;
  4255. case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break;
  4256. case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break;
  4257. }
  4258. Ops.push_back(EmitScalarExpr(E->getArg(1)));
  4259. Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
  4260. Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
  4261. Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]);
  4262. return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd");
  4263. }
  4264. case NEON::BI__builtin_neon_vtstd_s64:
  4265. case NEON::BI__builtin_neon_vtstd_u64: {
  4266. llvm::Type *Ty = llvm::Type::getInt64Ty(getLLVMContext());
  4267. Ops.push_back(EmitScalarExpr(E->getArg(1)));
  4268. Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
  4269. Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
  4270. Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
  4271. Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
  4272. llvm::Constant::getNullValue(Ty));
  4273. return Builder.CreateSExt(Ops[0], Ty, "vtstd");
  4274. }
  4275. case NEON::BI__builtin_neon_vset_lane_i8:
  4276. case NEON::BI__builtin_neon_vset_lane_i16:
  4277. case NEON::BI__builtin_neon_vset_lane_i32:
  4278. case NEON::BI__builtin_neon_vset_lane_i64:
  4279. case NEON::BI__builtin_neon_vset_lane_f32:
  4280. case NEON::BI__builtin_neon_vsetq_lane_i8:
  4281. case NEON::BI__builtin_neon_vsetq_lane_i16:
  4282. case NEON::BI__builtin_neon_vsetq_lane_i32:
  4283. case NEON::BI__builtin_neon_vsetq_lane_i64:
  4284. case NEON::BI__builtin_neon_vsetq_lane_f32:
  4285. Ops.push_back(EmitScalarExpr(E->getArg(2)));
  4286. return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
  4287. case NEON::BI__builtin_neon_vset_lane_f64:
  4288. // The vector type needs a cast for the v1f64 variant.
  4289. Ops[1] = Builder.CreateBitCast(Ops[1],
  4290. llvm::VectorType::get(DoubleTy, 1));
  4291. Ops.push_back(EmitScalarExpr(E->getArg(2)));
  4292. return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
  4293. case NEON::BI__builtin_neon_vsetq_lane_f64:
  4294. // The vector type needs a cast for the v2f64 variant.
  4295. Ops[1] = Builder.CreateBitCast(Ops[1],
  4296. llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 2));
  4297. Ops.push_back(EmitScalarExpr(E->getArg(2)));
  4298. return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
  4299. case NEON::BI__builtin_neon_vget_lane_i8:
  4300. case NEON::BI__builtin_neon_vdupb_lane_i8:
  4301. Ops[0] = Builder.CreateBitCast(Ops[0],
  4302. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8));
  4303. return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
  4304. "vget_lane");
  4305. case NEON::BI__builtin_neon_vgetq_lane_i8:
  4306. case NEON::BI__builtin_neon_vdupb_laneq_i8:
  4307. Ops[0] = Builder.CreateBitCast(Ops[0],
  4308. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16));
  4309. return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
  4310. "vgetq_lane");
  4311. case NEON::BI__builtin_neon_vget_lane_i16:
  4312. case NEON::BI__builtin_neon_vduph_lane_i16:
  4313. Ops[0] = Builder.CreateBitCast(Ops[0],
  4314. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4));
  4315. return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
  4316. "vget_lane");
  4317. case NEON::BI__builtin_neon_vgetq_lane_i16:
  4318. case NEON::BI__builtin_neon_vduph_laneq_i16:
  4319. Ops[0] = Builder.CreateBitCast(Ops[0],
  4320. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8));
  4321. return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
  4322. "vgetq_lane");
  4323. case NEON::BI__builtin_neon_vget_lane_i32:
  4324. case NEON::BI__builtin_neon_vdups_lane_i32:
  4325. Ops[0] = Builder.CreateBitCast(
  4326. Ops[0],
  4327. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 32), 2));
  4328. return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
  4329. "vget_lane");
  4330. case NEON::BI__builtin_neon_vdups_lane_f32:
  4331. Ops[0] = Builder.CreateBitCast(Ops[0],
  4332. llvm::VectorType::get(llvm::Type::getFloatTy(getLLVMContext()), 2));
  4333. return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
  4334. "vdups_lane");
  4335. case NEON::BI__builtin_neon_vgetq_lane_i32:
  4336. case NEON::BI__builtin_neon_vdups_laneq_i32:
  4337. Ops[0] = Builder.CreateBitCast(Ops[0],
  4338. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 32), 4));
  4339. return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
  4340. "vgetq_lane");
  4341. case NEON::BI__builtin_neon_vget_lane_i64:
  4342. case NEON::BI__builtin_neon_vdupd_lane_i64:
  4343. Ops[0] = Builder.CreateBitCast(Ops[0],
  4344. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 64), 1));
  4345. return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
  4346. "vget_lane");
  4347. case NEON::BI__builtin_neon_vdupd_lane_f64:
  4348. Ops[0] = Builder.CreateBitCast(Ops[0],
  4349. llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 1));
  4350. return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
  4351. "vdupd_lane");
  4352. case NEON::BI__builtin_neon_vgetq_lane_i64:
  4353. case NEON::BI__builtin_neon_vdupd_laneq_i64:
  4354. Ops[0] = Builder.CreateBitCast(Ops[0],
  4355. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 64), 2));
  4356. return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
  4357. "vgetq_lane");
  4358. case NEON::BI__builtin_neon_vget_lane_f32:
  4359. Ops[0] = Builder.CreateBitCast(Ops[0],
  4360. llvm::VectorType::get(llvm::Type::getFloatTy(getLLVMContext()), 2));
  4361. return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
  4362. "vget_lane");
  4363. case NEON::BI__builtin_neon_vget_lane_f64:
  4364. Ops[0] = Builder.CreateBitCast(Ops[0],
  4365. llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 1));
  4366. return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
  4367. "vget_lane");
  4368. case NEON::BI__builtin_neon_vgetq_lane_f32:
  4369. case NEON::BI__builtin_neon_vdups_laneq_f32:
  4370. Ops[0] = Builder.CreateBitCast(Ops[0],
  4371. llvm::VectorType::get(llvm::Type::getFloatTy(getLLVMContext()), 4));
  4372. return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
  4373. "vgetq_lane");
  4374. case NEON::BI__builtin_neon_vgetq_lane_f64:
  4375. case NEON::BI__builtin_neon_vdupd_laneq_f64:
  4376. Ops[0] = Builder.CreateBitCast(Ops[0],
  4377. llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 2));
  4378. return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
  4379. "vgetq_lane");
  4380. case NEON::BI__builtin_neon_vaddd_s64:
  4381. case NEON::BI__builtin_neon_vaddd_u64:
  4382. return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd");
  4383. case NEON::BI__builtin_neon_vsubd_s64:
  4384. case NEON::BI__builtin_neon_vsubd_u64:
  4385. return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd");
  4386. case NEON::BI__builtin_neon_vqdmlalh_s16:
  4387. case NEON::BI__builtin_neon_vqdmlslh_s16: {
  4388. SmallVector<Value *, 2> ProductOps;
  4389. ProductOps.push_back(vectorWrapScalar16(Ops[1]));
  4390. ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2))));
  4391. llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
  4392. Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
  4393. ProductOps, "vqdmlXl");
  4394. Constant *CI = ConstantInt::get(SizeTy, 0);
  4395. Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
  4396. unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16
  4397. ? Intrinsic::aarch64_neon_sqadd
  4398. : Intrinsic::aarch64_neon_sqsub;
  4399. return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl");
  4400. }
  4401. case NEON::BI__builtin_neon_vqshlud_n_s64: {
  4402. Ops.push_back(EmitScalarExpr(E->getArg(1)));
  4403. Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
  4404. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty),
  4405. Ops, "vqshlu_n");
  4406. }
  4407. case NEON::BI__builtin_neon_vqshld_n_u64:
  4408. case NEON::BI__builtin_neon_vqshld_n_s64: {
  4409. unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64
  4410. ? Intrinsic::aarch64_neon_uqshl
  4411. : Intrinsic::aarch64_neon_sqshl;
  4412. Ops.push_back(EmitScalarExpr(E->getArg(1)));
  4413. Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
  4414. return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n");
  4415. }
  4416. case NEON::BI__builtin_neon_vrshrd_n_u64:
  4417. case NEON::BI__builtin_neon_vrshrd_n_s64: {
  4418. unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64
  4419. ? Intrinsic::aarch64_neon_urshl
  4420. : Intrinsic::aarch64_neon_srshl;
  4421. Ops.push_back(EmitScalarExpr(E->getArg(1)));
  4422. int SV = cast<ConstantInt>(Ops[1])->getSExtValue();
  4423. Ops[1] = ConstantInt::get(Int64Ty, -SV);
  4424. return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vrshr_n");
  4425. }
  4426. case NEON::BI__builtin_neon_vrsrad_n_u64:
  4427. case NEON::BI__builtin_neon_vrsrad_n_s64: {
  4428. unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64
  4429. ? Intrinsic::aarch64_neon_urshl
  4430. : Intrinsic::aarch64_neon_srshl;
  4431. Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
  4432. Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2))));
  4433. Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty),
  4434. {Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)});
  4435. return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty));
  4436. }
  4437. case NEON::BI__builtin_neon_vshld_n_s64:
  4438. case NEON::BI__builtin_neon_vshld_n_u64: {
  4439. llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
  4440. return Builder.CreateShl(
  4441. Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n");
  4442. }
  4443. case NEON::BI__builtin_neon_vshrd_n_s64: {
  4444. llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
  4445. return Builder.CreateAShr(
  4446. Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
  4447. Amt->getZExtValue())),
  4448. "shrd_n");
  4449. }
  4450. case NEON::BI__builtin_neon_vshrd_n_u64: {
  4451. llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
  4452. uint64_t ShiftAmt = Amt->getZExtValue();
  4453. // Right-shifting an unsigned value by its size yields 0.
  4454. if (ShiftAmt == 64)
  4455. return ConstantInt::get(Int64Ty, 0);
  4456. return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt),
  4457. "shrd_n");
  4458. }
  4459. case NEON::BI__builtin_neon_vsrad_n_s64: {
  4460. llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
  4461. Ops[1] = Builder.CreateAShr(
  4462. Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
  4463. Amt->getZExtValue())),
  4464. "shrd_n");
  4465. return Builder.CreateAdd(Ops[0], Ops[1]);
  4466. }
  4467. case NEON::BI__builtin_neon_vsrad_n_u64: {
  4468. llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
  4469. uint64_t ShiftAmt = Amt->getZExtValue();
  4470. // Right-shifting an unsigned value by its size yields 0.
  4471. // As Op + 0 = Op, return Ops[0] directly.
  4472. if (ShiftAmt == 64)
  4473. return Ops[0];
  4474. Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt),
  4475. "shrd_n");
  4476. return Builder.CreateAdd(Ops[0], Ops[1]);
  4477. }
  4478. case NEON::BI__builtin_neon_vqdmlalh_lane_s16:
  4479. case NEON::BI__builtin_neon_vqdmlalh_laneq_s16:
  4480. case NEON::BI__builtin_neon_vqdmlslh_lane_s16:
  4481. case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: {
  4482. Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
  4483. "lane");
  4484. SmallVector<Value *, 2> ProductOps;
  4485. ProductOps.push_back(vectorWrapScalar16(Ops[1]));
  4486. ProductOps.push_back(vectorWrapScalar16(Ops[2]));
  4487. llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
  4488. Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
  4489. ProductOps, "vqdmlXl");
  4490. Constant *CI = ConstantInt::get(SizeTy, 0);
  4491. Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
  4492. Ops.pop_back();
  4493. unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 ||
  4494. BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16)
  4495. ? Intrinsic::aarch64_neon_sqadd
  4496. : Intrinsic::aarch64_neon_sqsub;
  4497. return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl");
  4498. }
  4499. case NEON::BI__builtin_neon_vqdmlals_s32:
  4500. case NEON::BI__builtin_neon_vqdmlsls_s32: {
  4501. SmallVector<Value *, 2> ProductOps;
  4502. ProductOps.push_back(Ops[1]);
  4503. ProductOps.push_back(EmitScalarExpr(E->getArg(2)));
  4504. Ops[1] =
  4505. EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
  4506. ProductOps, "vqdmlXl");
  4507. unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32
  4508. ? Intrinsic::aarch64_neon_sqadd
  4509. : Intrinsic::aarch64_neon_sqsub;
  4510. return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl");
  4511. }
  4512. case NEON::BI__builtin_neon_vqdmlals_lane_s32:
  4513. case NEON::BI__builtin_neon_vqdmlals_laneq_s32:
  4514. case NEON::BI__builtin_neon_vqdmlsls_lane_s32:
  4515. case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: {
  4516. Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
  4517. "lane");
  4518. SmallVector<Value *, 2> ProductOps;
  4519. ProductOps.push_back(Ops[1]);
  4520. ProductOps.push_back(Ops[2]);
  4521. Ops[1] =
  4522. EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
  4523. ProductOps, "vqdmlXl");
  4524. Ops.pop_back();
  4525. unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 ||
  4526. BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32)
  4527. ? Intrinsic::aarch64_neon_sqadd
  4528. : Intrinsic::aarch64_neon_sqsub;
  4529. return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
  4530. }
  4531. }
  4532. llvm::VectorType *VTy = GetNeonType(this, Type);
  4533. llvm::Type *Ty = VTy;
  4534. if (!Ty)
  4535. return nullptr;
  4536. // Not all intrinsics handled by the common case work for AArch64 yet, so only
  4537. // defer to common code if it's been added to our special map.
  4538. Builtin = findNeonIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID,
  4539. AArch64SIMDIntrinsicsProvenSorted);
  4540. if (Builtin)
  4541. return EmitCommonNeonBuiltinExpr(
  4542. Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
  4543. Builtin->NameHint, Builtin->TypeModifier, E, Ops, nullptr);
  4544. if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops))
  4545. return V;
  4546. unsigned Int;
  4547. switch (BuiltinID) {
  4548. default: return nullptr;
  4549. case NEON::BI__builtin_neon_vbsl_v:
  4550. case NEON::BI__builtin_neon_vbslq_v: {
  4551. llvm::Type *BitTy = llvm::VectorType::getInteger(VTy);
  4552. Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl");
  4553. Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl");
  4554. Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl");
  4555. Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl");
  4556. Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl");
  4557. Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl");
  4558. return Builder.CreateBitCast(Ops[0], Ty);
  4559. }
  4560. case NEON::BI__builtin_neon_vfma_lane_v:
  4561. case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types
  4562. // The ARM builtins (and instructions) have the addend as the first
  4563. // operand, but the 'fma' intrinsics have it last. Swap it around here.
  4564. Value *Addend = Ops[0];
  4565. Value *Multiplicand = Ops[1];
  4566. Value *LaneSource = Ops[2];
  4567. Ops[0] = Multiplicand;
  4568. Ops[1] = LaneSource;
  4569. Ops[2] = Addend;
  4570. // Now adjust things to handle the lane access.
  4571. llvm::Type *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v ?
  4572. llvm::VectorType::get(VTy->getElementType(), VTy->getNumElements() / 2) :
  4573. VTy;
  4574. llvm::Constant *cst = cast<Constant>(Ops[3]);
  4575. Value *SV = llvm::ConstantVector::getSplat(VTy->getNumElements(), cst);
  4576. Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy);
  4577. Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane");
  4578. Ops.pop_back();
  4579. Int = Intrinsic::fma;
  4580. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla");
  4581. }
  4582. case NEON::BI__builtin_neon_vfma_laneq_v: {
  4583. llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
  4584. // v1f64 fma should be mapped to Neon scalar f64 fma
  4585. if (VTy && VTy->getElementType() == DoubleTy) {
  4586. Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
  4587. Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
  4588. llvm::Type *VTy = GetNeonType(this,
  4589. NeonTypeFlags(NeonTypeFlags::Float64, false, true));
  4590. Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
  4591. Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
  4592. Value *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy);
  4593. Value *Result = Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
  4594. return Builder.CreateBitCast(Result, Ty);
  4595. }
  4596. Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
  4597. Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
  4598. Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
  4599. llvm::Type *STy = llvm::VectorType::get(VTy->getElementType(),
  4600. VTy->getNumElements() * 2);
  4601. Ops[2] = Builder.CreateBitCast(Ops[2], STy);
  4602. Value* SV = llvm::ConstantVector::getSplat(VTy->getNumElements(),
  4603. cast<ConstantInt>(Ops[3]));
  4604. Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
  4605. return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]});
  4606. }
  4607. case NEON::BI__builtin_neon_vfmaq_laneq_v: {
  4608. Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
  4609. Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
  4610. Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
  4611. Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
  4612. Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
  4613. return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]});
  4614. }
  4615. case NEON::BI__builtin_neon_vfmas_lane_f32:
  4616. case NEON::BI__builtin_neon_vfmas_laneq_f32:
  4617. case NEON::BI__builtin_neon_vfmad_lane_f64:
  4618. case NEON::BI__builtin_neon_vfmad_laneq_f64: {
  4619. Ops.push_back(EmitScalarExpr(E->getArg(3)));
  4620. llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
  4621. Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
  4622. Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
  4623. return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
  4624. }
  4625. case NEON::BI__builtin_neon_vfms_v:
  4626. case NEON::BI__builtin_neon_vfmsq_v: { // Only used for FP types
  4627. // FIXME: probably remove when we no longer support aarch64_simd.h
  4628. // (arm_neon.h delegates to vfma).
  4629. // The ARM builtins (and instructions) have the addend as the first
  4630. // operand, but the 'fma' intrinsics have it last. Swap it around here.
  4631. Value *Subtrahend = Ops[0];
  4632. Value *Multiplicand = Ops[2];
  4633. Ops[0] = Multiplicand;
  4634. Ops[2] = Subtrahend;
  4635. Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
  4636. Ops[1] = Builder.CreateFNeg(Ops[1]);
  4637. Int = Intrinsic::fma;
  4638. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmls");
  4639. }
  4640. case NEON::BI__builtin_neon_vmull_v:
  4641. // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
  4642. Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull;
  4643. if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull;
  4644. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
  4645. case NEON::BI__builtin_neon_vmax_v:
  4646. case NEON::BI__builtin_neon_vmaxq_v:
  4647. // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
  4648. Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax;
  4649. if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax;
  4650. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
  4651. case NEON::BI__builtin_neon_vmin_v:
  4652. case NEON::BI__builtin_neon_vminq_v:
  4653. // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
  4654. Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin;
  4655. if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin;
  4656. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
  4657. case NEON::BI__builtin_neon_vabd_v:
  4658. case NEON::BI__builtin_neon_vabdq_v:
  4659. // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
  4660. Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd;
  4661. if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd;
  4662. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
  4663. case NEON::BI__builtin_neon_vpadal_v:
  4664. case NEON::BI__builtin_neon_vpadalq_v: {
  4665. unsigned ArgElts = VTy->getNumElements();
  4666. llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType());
  4667. unsigned BitWidth = EltTy->getBitWidth();
  4668. llvm::Type *ArgTy = llvm::VectorType::get(
  4669. llvm::IntegerType::get(getLLVMContext(), BitWidth/2), 2*ArgElts);
  4670. llvm::Type* Tys[2] = { VTy, ArgTy };
  4671. Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
  4672. SmallVector<llvm::Value*, 1> TmpOps;
  4673. TmpOps.push_back(Ops[1]);
  4674. Function *F = CGM.getIntrinsic(Int, Tys);
  4675. llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal");
  4676. llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType());
  4677. return Builder.CreateAdd(tmp, addend);
  4678. }
  4679. case NEON::BI__builtin_neon_vpmin_v:
  4680. case NEON::BI__builtin_neon_vpminq_v:
  4681. // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
  4682. Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp;
  4683. if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp;
  4684. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
  4685. case NEON::BI__builtin_neon_vpmax_v:
  4686. case NEON::BI__builtin_neon_vpmaxq_v:
  4687. // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
  4688. Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp;
  4689. if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp;
  4690. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
  4691. case NEON::BI__builtin_neon_vminnm_v:
  4692. case NEON::BI__builtin_neon_vminnmq_v:
  4693. Int = Intrinsic::aarch64_neon_fminnm;
  4694. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
  4695. case NEON::BI__builtin_neon_vmaxnm_v:
  4696. case NEON::BI__builtin_neon_vmaxnmq_v:
  4697. Int = Intrinsic::aarch64_neon_fmaxnm;
  4698. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
  4699. case NEON::BI__builtin_neon_vrecpss_f32: {
  4700. llvm::Type *f32Type = llvm::Type::getFloatTy(getLLVMContext());
  4701. Ops.push_back(EmitScalarExpr(E->getArg(1)));
  4702. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, f32Type),
  4703. Ops, "vrecps");
  4704. }
  4705. case NEON::BI__builtin_neon_vrecpsd_f64: {
  4706. llvm::Type *f64Type = llvm::Type::getDoubleTy(getLLVMContext());
  4707. Ops.push_back(EmitScalarExpr(E->getArg(1)));
  4708. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, f64Type),
  4709. Ops, "vrecps");
  4710. }
  4711. case NEON::BI__builtin_neon_vqshrun_n_v:
  4712. Int = Intrinsic::aarch64_neon_sqshrun;
  4713. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
  4714. case NEON::BI__builtin_neon_vqrshrun_n_v:
  4715. Int = Intrinsic::aarch64_neon_sqrshrun;
  4716. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
  4717. case NEON::BI__builtin_neon_vqshrn_n_v:
  4718. Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
  4719. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
  4720. case NEON::BI__builtin_neon_vrshrn_n_v:
  4721. Int = Intrinsic::aarch64_neon_rshrn;
  4722. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
  4723. case NEON::BI__builtin_neon_vqrshrn_n_v:
  4724. Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
  4725. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
  4726. case NEON::BI__builtin_neon_vrnda_v:
  4727. case NEON::BI__builtin_neon_vrndaq_v: {
  4728. Int = Intrinsic::round;
  4729. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
  4730. }
  4731. case NEON::BI__builtin_neon_vrndi_v:
  4732. case NEON::BI__builtin_neon_vrndiq_v: {
  4733. Int = Intrinsic::nearbyint;
  4734. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndi");
  4735. }
  4736. case NEON::BI__builtin_neon_vrndm_v:
  4737. case NEON::BI__builtin_neon_vrndmq_v: {
  4738. Int = Intrinsic::floor;
  4739. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
  4740. }
  4741. case NEON::BI__builtin_neon_vrndn_v:
  4742. case NEON::BI__builtin_neon_vrndnq_v: {
  4743. Int = Intrinsic::aarch64_neon_frintn;
  4744. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
  4745. }
  4746. case NEON::BI__builtin_neon_vrndp_v:
  4747. case NEON::BI__builtin_neon_vrndpq_v: {
  4748. Int = Intrinsic::ceil;
  4749. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
  4750. }
  4751. case NEON::BI__builtin_neon_vrndx_v:
  4752. case NEON::BI__builtin_neon_vrndxq_v: {
  4753. Int = Intrinsic::rint;
  4754. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
  4755. }
  4756. case NEON::BI__builtin_neon_vrnd_v:
  4757. case NEON::BI__builtin_neon_vrndq_v: {
  4758. Int = Intrinsic::trunc;
  4759. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
  4760. }
  4761. case NEON::BI__builtin_neon_vceqz_v:
  4762. case NEON::BI__builtin_neon_vceqzq_v:
  4763. return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
  4764. ICmpInst::ICMP_EQ, "vceqz");
  4765. case NEON::BI__builtin_neon_vcgez_v:
  4766. case NEON::BI__builtin_neon_vcgezq_v:
  4767. return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
  4768. ICmpInst::ICMP_SGE, "vcgez");
  4769. case NEON::BI__builtin_neon_vclez_v:
  4770. case NEON::BI__builtin_neon_vclezq_v:
  4771. return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
  4772. ICmpInst::ICMP_SLE, "vclez");
  4773. case NEON::BI__builtin_neon_vcgtz_v:
  4774. case NEON::BI__builtin_neon_vcgtzq_v:
  4775. return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
  4776. ICmpInst::ICMP_SGT, "vcgtz");
  4777. case NEON::BI__builtin_neon_vcltz_v:
  4778. case NEON::BI__builtin_neon_vcltzq_v:
  4779. return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
  4780. ICmpInst::ICMP_SLT, "vcltz");
  4781. case NEON::BI__builtin_neon_vcvt_f64_v:
  4782. case NEON::BI__builtin_neon_vcvtq_f64_v:
  4783. Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
  4784. Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
  4785. return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
  4786. : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
  4787. case NEON::BI__builtin_neon_vcvt_f64_f32: {
  4788. assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&
  4789. "unexpected vcvt_f64_f32 builtin");
  4790. NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false);
  4791. Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
  4792. return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
  4793. }
  4794. case NEON::BI__builtin_neon_vcvt_f32_f64: {
  4795. assert(Type.getEltType() == NeonTypeFlags::Float32 &&
  4796. "unexpected vcvt_f32_f64 builtin");
  4797. NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true);
  4798. Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
  4799. return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
  4800. }
  4801. case NEON::BI__builtin_neon_vcvt_s32_v:
  4802. case NEON::BI__builtin_neon_vcvt_u32_v:
  4803. case NEON::BI__builtin_neon_vcvt_s64_v:
  4804. case NEON::BI__builtin_neon_vcvt_u64_v:
  4805. case NEON::BI__builtin_neon_vcvtq_s32_v:
  4806. case NEON::BI__builtin_neon_vcvtq_u32_v:
  4807. case NEON::BI__builtin_neon_vcvtq_s64_v:
  4808. case NEON::BI__builtin_neon_vcvtq_u64_v: {
  4809. bool Double =
  4810. (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
  4811. llvm::Type *InTy =
  4812. GetNeonType(this,
  4813. NeonTypeFlags(Double ? NeonTypeFlags::Float64
  4814. : NeonTypeFlags::Float32, false, quad));
  4815. Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
  4816. if (usgn)
  4817. return Builder.CreateFPToUI(Ops[0], Ty);
  4818. return Builder.CreateFPToSI(Ops[0], Ty);
  4819. }
  4820. case NEON::BI__builtin_neon_vcvta_s32_v:
  4821. case NEON::BI__builtin_neon_vcvtaq_s32_v:
  4822. case NEON::BI__builtin_neon_vcvta_u32_v:
  4823. case NEON::BI__builtin_neon_vcvtaq_u32_v:
  4824. case NEON::BI__builtin_neon_vcvta_s64_v:
  4825. case NEON::BI__builtin_neon_vcvtaq_s64_v:
  4826. case NEON::BI__builtin_neon_vcvta_u64_v:
  4827. case NEON::BI__builtin_neon_vcvtaq_u64_v: {
  4828. Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas;
  4829. bool Double =
  4830. (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
  4831. llvm::Type *InTy =
  4832. GetNeonType(this,
  4833. NeonTypeFlags(Double ? NeonTypeFlags::Float64
  4834. : NeonTypeFlags::Float32, false, quad));
  4835. llvm::Type *Tys[2] = { Ty, InTy };
  4836. return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
  4837. }
  4838. case NEON::BI__builtin_neon_vcvtm_s32_v:
  4839. case NEON::BI__builtin_neon_vcvtmq_s32_v:
  4840. case NEON::BI__builtin_neon_vcvtm_u32_v:
  4841. case NEON::BI__builtin_neon_vcvtmq_u32_v:
  4842. case NEON::BI__builtin_neon_vcvtm_s64_v:
  4843. case NEON::BI__builtin_neon_vcvtmq_s64_v:
  4844. case NEON::BI__builtin_neon_vcvtm_u64_v:
  4845. case NEON::BI__builtin_neon_vcvtmq_u64_v: {
  4846. Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms;
  4847. bool Double =
  4848. (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
  4849. llvm::Type *InTy =
  4850. GetNeonType(this,
  4851. NeonTypeFlags(Double ? NeonTypeFlags::Float64
  4852. : NeonTypeFlags::Float32, false, quad));
  4853. llvm::Type *Tys[2] = { Ty, InTy };
  4854. return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
  4855. }
  4856. case NEON::BI__builtin_neon_vcvtn_s32_v:
  4857. case NEON::BI__builtin_neon_vcvtnq_s32_v:
  4858. case NEON::BI__builtin_neon_vcvtn_u32_v:
  4859. case NEON::BI__builtin_neon_vcvtnq_u32_v:
  4860. case NEON::BI__builtin_neon_vcvtn_s64_v:
  4861. case NEON::BI__builtin_neon_vcvtnq_s64_v:
  4862. case NEON::BI__builtin_neon_vcvtn_u64_v:
  4863. case NEON::BI__builtin_neon_vcvtnq_u64_v: {
  4864. Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns;
  4865. bool Double =
  4866. (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
  4867. llvm::Type *InTy =
  4868. GetNeonType(this,
  4869. NeonTypeFlags(Double ? NeonTypeFlags::Float64
  4870. : NeonTypeFlags::Float32, false, quad));
  4871. llvm::Type *Tys[2] = { Ty, InTy };
  4872. return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
  4873. }
  4874. case NEON::BI__builtin_neon_vcvtp_s32_v:
  4875. case NEON::BI__builtin_neon_vcvtpq_s32_v:
  4876. case NEON::BI__builtin_neon_vcvtp_u32_v:
  4877. case NEON::BI__builtin_neon_vcvtpq_u32_v:
  4878. case NEON::BI__builtin_neon_vcvtp_s64_v:
  4879. case NEON::BI__builtin_neon_vcvtpq_s64_v:
  4880. case NEON::BI__builtin_neon_vcvtp_u64_v:
  4881. case NEON::BI__builtin_neon_vcvtpq_u64_v: {
  4882. Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps;
  4883. bool Double =
  4884. (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
  4885. llvm::Type *InTy =
  4886. GetNeonType(this,
  4887. NeonTypeFlags(Double ? NeonTypeFlags::Float64
  4888. : NeonTypeFlags::Float32, false, quad));
  4889. llvm::Type *Tys[2] = { Ty, InTy };
  4890. return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp");
  4891. }
  4892. case NEON::BI__builtin_neon_vmulx_v:
  4893. case NEON::BI__builtin_neon_vmulxq_v: {
  4894. Int = Intrinsic::aarch64_neon_fmulx;
  4895. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
  4896. }
  4897. case NEON::BI__builtin_neon_vmul_lane_v:
  4898. case NEON::BI__builtin_neon_vmul_laneq_v: {
  4899. // v1f64 vmul_lane should be mapped to Neon scalar mul lane
  4900. bool Quad = false;
  4901. if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v)
  4902. Quad = true;
  4903. Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
  4904. llvm::Type *VTy = GetNeonType(this,
  4905. NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
  4906. Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
  4907. Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
  4908. Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
  4909. return Builder.CreateBitCast(Result, Ty);
  4910. }
  4911. case NEON::BI__builtin_neon_vnegd_s64:
  4912. return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd");
  4913. case NEON::BI__builtin_neon_vpmaxnm_v:
  4914. case NEON::BI__builtin_neon_vpmaxnmq_v: {
  4915. Int = Intrinsic::aarch64_neon_fmaxnmp;
  4916. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
  4917. }
  4918. case NEON::BI__builtin_neon_vpminnm_v:
  4919. case NEON::BI__builtin_neon_vpminnmq_v: {
  4920. Int = Intrinsic::aarch64_neon_fminnmp;
  4921. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
  4922. }
  4923. case NEON::BI__builtin_neon_vsqrt_v:
  4924. case NEON::BI__builtin_neon_vsqrtq_v: {
  4925. Int = Intrinsic::sqrt;
  4926. Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
  4927. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
  4928. }
  4929. case NEON::BI__builtin_neon_vrbit_v:
  4930. case NEON::BI__builtin_neon_vrbitq_v: {
  4931. Int = Intrinsic::aarch64_neon_rbit;
  4932. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
  4933. }
  4934. case NEON::BI__builtin_neon_vaddv_u8:
  4935. // FIXME: These are handled by the AArch64 scalar code.
  4936. usgn = true;
  4937. // FALLTHROUGH
  4938. case NEON::BI__builtin_neon_vaddv_s8: {
  4939. Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
  4940. Ty = llvm::IntegerType::get(getLLVMContext(), 32);
  4941. VTy =
  4942. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
  4943. llvm::Type *Tys[2] = { Ty, VTy };
  4944. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  4945. Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
  4946. return Builder.CreateTrunc(Ops[0],
  4947. llvm::IntegerType::get(getLLVMContext(), 8));
  4948. }
  4949. case NEON::BI__builtin_neon_vaddv_u16:
  4950. usgn = true;
  4951. // FALLTHROUGH
  4952. case NEON::BI__builtin_neon_vaddv_s16: {
  4953. Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
  4954. Ty = llvm::IntegerType::get(getLLVMContext(), 32);
  4955. VTy =
  4956. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
  4957. llvm::Type *Tys[2] = { Ty, VTy };
  4958. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  4959. Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
  4960. return Builder.CreateTrunc(Ops[0],
  4961. llvm::IntegerType::get(getLLVMContext(), 16));
  4962. }
  4963. case NEON::BI__builtin_neon_vaddvq_u8:
  4964. usgn = true;
  4965. // FALLTHROUGH
  4966. case NEON::BI__builtin_neon_vaddvq_s8: {
  4967. Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
  4968. Ty = llvm::IntegerType::get(getLLVMContext(), 32);
  4969. VTy =
  4970. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
  4971. llvm::Type *Tys[2] = { Ty, VTy };
  4972. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  4973. Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
  4974. return Builder.CreateTrunc(Ops[0],
  4975. llvm::IntegerType::get(getLLVMContext(), 8));
  4976. }
  4977. case NEON::BI__builtin_neon_vaddvq_u16:
  4978. usgn = true;
  4979. // FALLTHROUGH
  4980. case NEON::BI__builtin_neon_vaddvq_s16: {
  4981. Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
  4982. Ty = llvm::IntegerType::get(getLLVMContext(), 32);
  4983. VTy =
  4984. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
  4985. llvm::Type *Tys[2] = { Ty, VTy };
  4986. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  4987. Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
  4988. return Builder.CreateTrunc(Ops[0],
  4989. llvm::IntegerType::get(getLLVMContext(), 16));
  4990. }
  4991. case NEON::BI__builtin_neon_vmaxv_u8: {
  4992. Int = Intrinsic::aarch64_neon_umaxv;
  4993. Ty = llvm::IntegerType::get(getLLVMContext(), 32);
  4994. VTy =
  4995. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
  4996. llvm::Type *Tys[2] = { Ty, VTy };
  4997. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  4998. Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
  4999. return Builder.CreateTrunc(Ops[0],
  5000. llvm::IntegerType::get(getLLVMContext(), 8));
  5001. }
  5002. case NEON::BI__builtin_neon_vmaxv_u16: {
  5003. Int = Intrinsic::aarch64_neon_umaxv;
  5004. Ty = llvm::IntegerType::get(getLLVMContext(), 32);
  5005. VTy =
  5006. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
  5007. llvm::Type *Tys[2] = { Ty, VTy };
  5008. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  5009. Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
  5010. return Builder.CreateTrunc(Ops[0],
  5011. llvm::IntegerType::get(getLLVMContext(), 16));
  5012. }
  5013. case NEON::BI__builtin_neon_vmaxvq_u8: {
  5014. Int = Intrinsic::aarch64_neon_umaxv;
  5015. Ty = llvm::IntegerType::get(getLLVMContext(), 32);
  5016. VTy =
  5017. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
  5018. llvm::Type *Tys[2] = { Ty, VTy };
  5019. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  5020. Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
  5021. return Builder.CreateTrunc(Ops[0],
  5022. llvm::IntegerType::get(getLLVMContext(), 8));
  5023. }
  5024. case NEON::BI__builtin_neon_vmaxvq_u16: {
  5025. Int = Intrinsic::aarch64_neon_umaxv;
  5026. Ty = llvm::IntegerType::get(getLLVMContext(), 32);
  5027. VTy =
  5028. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
  5029. llvm::Type *Tys[2] = { Ty, VTy };
  5030. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  5031. Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
  5032. return Builder.CreateTrunc(Ops[0],
  5033. llvm::IntegerType::get(getLLVMContext(), 16));
  5034. }
  5035. case NEON::BI__builtin_neon_vmaxv_s8: {
  5036. Int = Intrinsic::aarch64_neon_smaxv;
  5037. Ty = llvm::IntegerType::get(getLLVMContext(), 32);
  5038. VTy =
  5039. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
  5040. llvm::Type *Tys[2] = { Ty, VTy };
  5041. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  5042. Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
  5043. return Builder.CreateTrunc(Ops[0],
  5044. llvm::IntegerType::get(getLLVMContext(), 8));
  5045. }
  5046. case NEON::BI__builtin_neon_vmaxv_s16: {
  5047. Int = Intrinsic::aarch64_neon_smaxv;
  5048. Ty = llvm::IntegerType::get(getLLVMContext(), 32);
  5049. VTy =
  5050. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
  5051. llvm::Type *Tys[2] = { Ty, VTy };
  5052. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  5053. Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
  5054. return Builder.CreateTrunc(Ops[0],
  5055. llvm::IntegerType::get(getLLVMContext(), 16));
  5056. }
  5057. case NEON::BI__builtin_neon_vmaxvq_s8: {
  5058. Int = Intrinsic::aarch64_neon_smaxv;
  5059. Ty = llvm::IntegerType::get(getLLVMContext(), 32);
  5060. VTy =
  5061. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
  5062. llvm::Type *Tys[2] = { Ty, VTy };
  5063. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  5064. Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
  5065. return Builder.CreateTrunc(Ops[0],
  5066. llvm::IntegerType::get(getLLVMContext(), 8));
  5067. }
  5068. case NEON::BI__builtin_neon_vmaxvq_s16: {
  5069. Int = Intrinsic::aarch64_neon_smaxv;
  5070. Ty = llvm::IntegerType::get(getLLVMContext(), 32);
  5071. VTy =
  5072. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
  5073. llvm::Type *Tys[2] = { Ty, VTy };
  5074. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  5075. Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
  5076. return Builder.CreateTrunc(Ops[0],
  5077. llvm::IntegerType::get(getLLVMContext(), 16));
  5078. }
  5079. case NEON::BI__builtin_neon_vminv_u8: {
  5080. Int = Intrinsic::aarch64_neon_uminv;
  5081. Ty = llvm::IntegerType::get(getLLVMContext(), 32);
  5082. VTy =
  5083. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
  5084. llvm::Type *Tys[2] = { Ty, VTy };
  5085. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  5086. Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
  5087. return Builder.CreateTrunc(Ops[0],
  5088. llvm::IntegerType::get(getLLVMContext(), 8));
  5089. }
  5090. case NEON::BI__builtin_neon_vminv_u16: {
  5091. Int = Intrinsic::aarch64_neon_uminv;
  5092. Ty = llvm::IntegerType::get(getLLVMContext(), 32);
  5093. VTy =
  5094. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
  5095. llvm::Type *Tys[2] = { Ty, VTy };
  5096. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  5097. Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
  5098. return Builder.CreateTrunc(Ops[0],
  5099. llvm::IntegerType::get(getLLVMContext(), 16));
  5100. }
  5101. case NEON::BI__builtin_neon_vminvq_u8: {
  5102. Int = Intrinsic::aarch64_neon_uminv;
  5103. Ty = llvm::IntegerType::get(getLLVMContext(), 32);
  5104. VTy =
  5105. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
  5106. llvm::Type *Tys[2] = { Ty, VTy };
  5107. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  5108. Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
  5109. return Builder.CreateTrunc(Ops[0],
  5110. llvm::IntegerType::get(getLLVMContext(), 8));
  5111. }
  5112. case NEON::BI__builtin_neon_vminvq_u16: {
  5113. Int = Intrinsic::aarch64_neon_uminv;
  5114. Ty = llvm::IntegerType::get(getLLVMContext(), 32);
  5115. VTy =
  5116. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
  5117. llvm::Type *Tys[2] = { Ty, VTy };
  5118. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  5119. Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
  5120. return Builder.CreateTrunc(Ops[0],
  5121. llvm::IntegerType::get(getLLVMContext(), 16));
  5122. }
  5123. case NEON::BI__builtin_neon_vminv_s8: {
  5124. Int = Intrinsic::aarch64_neon_sminv;
  5125. Ty = llvm::IntegerType::get(getLLVMContext(), 32);
  5126. VTy =
  5127. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
  5128. llvm::Type *Tys[2] = { Ty, VTy };
  5129. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  5130. Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
  5131. return Builder.CreateTrunc(Ops[0],
  5132. llvm::IntegerType::get(getLLVMContext(), 8));
  5133. }
  5134. case NEON::BI__builtin_neon_vminv_s16: {
  5135. Int = Intrinsic::aarch64_neon_sminv;
  5136. Ty = llvm::IntegerType::get(getLLVMContext(), 32);
  5137. VTy =
  5138. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
  5139. llvm::Type *Tys[2] = { Ty, VTy };
  5140. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  5141. Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
  5142. return Builder.CreateTrunc(Ops[0],
  5143. llvm::IntegerType::get(getLLVMContext(), 16));
  5144. }
  5145. case NEON::BI__builtin_neon_vminvq_s8: {
  5146. Int = Intrinsic::aarch64_neon_sminv;
  5147. Ty = llvm::IntegerType::get(getLLVMContext(), 32);
  5148. VTy =
  5149. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
  5150. llvm::Type *Tys[2] = { Ty, VTy };
  5151. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  5152. Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
  5153. return Builder.CreateTrunc(Ops[0],
  5154. llvm::IntegerType::get(getLLVMContext(), 8));
  5155. }
  5156. case NEON::BI__builtin_neon_vminvq_s16: {
  5157. Int = Intrinsic::aarch64_neon_sminv;
  5158. Ty = llvm::IntegerType::get(getLLVMContext(), 32);
  5159. VTy =
  5160. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
  5161. llvm::Type *Tys[2] = { Ty, VTy };
  5162. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  5163. Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
  5164. return Builder.CreateTrunc(Ops[0],
  5165. llvm::IntegerType::get(getLLVMContext(), 16));
  5166. }
  5167. case NEON::BI__builtin_neon_vmul_n_f64: {
  5168. Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
  5169. Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy);
  5170. return Builder.CreateFMul(Ops[0], RHS);
  5171. }
  5172. case NEON::BI__builtin_neon_vaddlv_u8: {
  5173. Int = Intrinsic::aarch64_neon_uaddlv;
  5174. Ty = llvm::IntegerType::get(getLLVMContext(), 32);
  5175. VTy =
  5176. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
  5177. llvm::Type *Tys[2] = { Ty, VTy };
  5178. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  5179. Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
  5180. return Builder.CreateTrunc(Ops[0],
  5181. llvm::IntegerType::get(getLLVMContext(), 16));
  5182. }
  5183. case NEON::BI__builtin_neon_vaddlv_u16: {
  5184. Int = Intrinsic::aarch64_neon_uaddlv;
  5185. Ty = llvm::IntegerType::get(getLLVMContext(), 32);
  5186. VTy =
  5187. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
  5188. llvm::Type *Tys[2] = { Ty, VTy };
  5189. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  5190. return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
  5191. }
  5192. case NEON::BI__builtin_neon_vaddlvq_u8: {
  5193. Int = Intrinsic::aarch64_neon_uaddlv;
  5194. Ty = llvm::IntegerType::get(getLLVMContext(), 32);
  5195. VTy =
  5196. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
  5197. llvm::Type *Tys[2] = { Ty, VTy };
  5198. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  5199. Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
  5200. return Builder.CreateTrunc(Ops[0],
  5201. llvm::IntegerType::get(getLLVMContext(), 16));
  5202. }
  5203. case NEON::BI__builtin_neon_vaddlvq_u16: {
  5204. Int = Intrinsic::aarch64_neon_uaddlv;
  5205. Ty = llvm::IntegerType::get(getLLVMContext(), 32);
  5206. VTy =
  5207. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
  5208. llvm::Type *Tys[2] = { Ty, VTy };
  5209. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  5210. return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
  5211. }
  5212. case NEON::BI__builtin_neon_vaddlv_s8: {
  5213. Int = Intrinsic::aarch64_neon_saddlv;
  5214. Ty = llvm::IntegerType::get(getLLVMContext(), 32);
  5215. VTy =
  5216. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
  5217. llvm::Type *Tys[2] = { Ty, VTy };
  5218. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  5219. Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
  5220. return Builder.CreateTrunc(Ops[0],
  5221. llvm::IntegerType::get(getLLVMContext(), 16));
  5222. }
  5223. case NEON::BI__builtin_neon_vaddlv_s16: {
  5224. Int = Intrinsic::aarch64_neon_saddlv;
  5225. Ty = llvm::IntegerType::get(getLLVMContext(), 32);
  5226. VTy =
  5227. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
  5228. llvm::Type *Tys[2] = { Ty, VTy };
  5229. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  5230. return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
  5231. }
  5232. case NEON::BI__builtin_neon_vaddlvq_s8: {
  5233. Int = Intrinsic::aarch64_neon_saddlv;
  5234. Ty = llvm::IntegerType::get(getLLVMContext(), 32);
  5235. VTy =
  5236. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
  5237. llvm::Type *Tys[2] = { Ty, VTy };
  5238. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  5239. Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
  5240. return Builder.CreateTrunc(Ops[0],
  5241. llvm::IntegerType::get(getLLVMContext(), 16));
  5242. }
  5243. case NEON::BI__builtin_neon_vaddlvq_s16: {
  5244. Int = Intrinsic::aarch64_neon_saddlv;
  5245. Ty = llvm::IntegerType::get(getLLVMContext(), 32);
  5246. VTy =
  5247. llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
  5248. llvm::Type *Tys[2] = { Ty, VTy };
  5249. Ops.push_back(EmitScalarExpr(E->getArg(0)));
  5250. return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
  5251. }
  5252. case NEON::BI__builtin_neon_vsri_n_v:
  5253. case NEON::BI__builtin_neon_vsriq_n_v: {
  5254. Int = Intrinsic::aarch64_neon_vsri;
  5255. llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
  5256. return EmitNeonCall(Intrin, Ops, "vsri_n");
  5257. }
  5258. case NEON::BI__builtin_neon_vsli_n_v:
  5259. case NEON::BI__builtin_neon_vsliq_n_v: {
  5260. Int = Intrinsic::aarch64_neon_vsli;
  5261. llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
  5262. return EmitNeonCall(Intrin, Ops, "vsli_n");
  5263. }
  5264. case NEON::BI__builtin_neon_vsra_n_v:
  5265. case NEON::BI__builtin_neon_vsraq_n_v:
  5266. Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
  5267. Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
  5268. return Builder.CreateAdd(Ops[0], Ops[1]);
  5269. case NEON::BI__builtin_neon_vrsra_n_v:
  5270. case NEON::BI__builtin_neon_vrsraq_n_v: {
  5271. Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
  5272. SmallVector<llvm::Value*,2> TmpOps;
  5273. TmpOps.push_back(Ops[1]);
  5274. TmpOps.push_back(Ops[2]);
  5275. Function* F = CGM.getIntrinsic(Int, Ty);
  5276. llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true);
  5277. Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
  5278. return Builder.CreateAdd(Ops[0], tmp);
  5279. }
  5280. // FIXME: Sharing loads & stores with 32-bit is complicated by the absence
  5281. // of an Align parameter here.
  5282. case NEON::BI__builtin_neon_vld1_x2_v:
  5283. case NEON::BI__builtin_neon_vld1q_x2_v:
  5284. case NEON::BI__builtin_neon_vld1_x3_v:
  5285. case NEON::BI__builtin_neon_vld1q_x3_v:
  5286. case NEON::BI__builtin_neon_vld1_x4_v:
  5287. case NEON::BI__builtin_neon_vld1q_x4_v: {
  5288. llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
  5289. Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
  5290. llvm::Type *Tys[2] = { VTy, PTy };
  5291. unsigned Int;
  5292. switch (BuiltinID) {
  5293. case NEON::BI__builtin_neon_vld1_x2_v:
  5294. case NEON::BI__builtin_neon_vld1q_x2_v:
  5295. Int = Intrinsic::aarch64_neon_ld1x2;
  5296. break;
  5297. case NEON::BI__builtin_neon_vld1_x3_v:
  5298. case NEON::BI__builtin_neon_vld1q_x3_v:
  5299. Int = Intrinsic::aarch64_neon_ld1x3;
  5300. break;
  5301. case NEON::BI__builtin_neon_vld1_x4_v:
  5302. case NEON::BI__builtin_neon_vld1q_x4_v:
  5303. Int = Intrinsic::aarch64_neon_ld1x4;
  5304. break;
  5305. }
  5306. Function *F = CGM.getIntrinsic(Int, Tys);
  5307. Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
  5308. Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
  5309. Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
  5310. return Builder.CreateStore(Ops[1], Ops[0]);
  5311. }
  5312. case NEON::BI__builtin_neon_vst1_x2_v:
  5313. case NEON::BI__builtin_neon_vst1q_x2_v:
  5314. case NEON::BI__builtin_neon_vst1_x3_v:
  5315. case NEON::BI__builtin_neon_vst1q_x3_v:
  5316. case NEON::BI__builtin_neon_vst1_x4_v:
  5317. case NEON::BI__builtin_neon_vst1q_x4_v: {
  5318. llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
  5319. llvm::Type *Tys[2] = { VTy, PTy };
  5320. unsigned Int;
  5321. switch (BuiltinID) {
  5322. case NEON::BI__builtin_neon_vst1_x2_v:
  5323. case NEON::BI__builtin_neon_vst1q_x2_v:
  5324. Int = Intrinsic::aarch64_neon_st1x2;
  5325. break;
  5326. case NEON::BI__builtin_neon_vst1_x3_v:
  5327. case NEON::BI__builtin_neon_vst1q_x3_v:
  5328. Int = Intrinsic::aarch64_neon_st1x3;
  5329. break;
  5330. case NEON::BI__builtin_neon_vst1_x4_v:
  5331. case NEON::BI__builtin_neon_vst1q_x4_v:
  5332. Int = Intrinsic::aarch64_neon_st1x4;
  5333. break;
  5334. }
  5335. SmallVector<Value *, 4> IntOps(Ops.begin()+1, Ops.end());
  5336. IntOps.push_back(Ops[0]);
  5337. return EmitNeonCall(CGM.getIntrinsic(Int, Tys), IntOps, "");
  5338. }
  5339. case NEON::BI__builtin_neon_vld1_v:
  5340. case NEON::BI__builtin_neon_vld1q_v:
  5341. Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
  5342. return Builder.CreateLoad(Ops[0]);
  5343. case NEON::BI__builtin_neon_vst1_v:
  5344. case NEON::BI__builtin_neon_vst1q_v:
  5345. Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
  5346. Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
  5347. return Builder.CreateStore(Ops[1], Ops[0]);
  5348. case NEON::BI__builtin_neon_vld1_lane_v:
  5349. case NEON::BI__builtin_neon_vld1q_lane_v:
  5350. Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
  5351. Ty = llvm::PointerType::getUnqual(VTy->getElementType());
  5352. Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
  5353. Ops[0] = Builder.CreateLoad(Ops[0]);
  5354. return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
  5355. case NEON::BI__builtin_neon_vld1_dup_v:
  5356. case NEON::BI__builtin_neon_vld1q_dup_v: {
  5357. Value *V = UndefValue::get(Ty);
  5358. Ty = llvm::PointerType::getUnqual(VTy->getElementType());
  5359. Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
  5360. Ops[0] = Builder.CreateLoad(Ops[0]);
  5361. llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
  5362. Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
  5363. return EmitNeonSplat(Ops[0], CI);
  5364. }
  5365. case NEON::BI__builtin_neon_vst1_lane_v:
  5366. case NEON::BI__builtin_neon_vst1q_lane_v:
  5367. Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
  5368. Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
  5369. Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
  5370. return Builder.CreateStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty));
  5371. case NEON::BI__builtin_neon_vld2_v:
  5372. case NEON::BI__builtin_neon_vld2q_v: {
  5373. llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
  5374. Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
  5375. llvm::Type *Tys[2] = { VTy, PTy };
  5376. Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
  5377. Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
  5378. Ops[0] = Builder.CreateBitCast(Ops[0],
  5379. llvm::PointerType::getUnqual(Ops[1]->getType()));
  5380. return Builder.CreateStore(Ops[1], Ops[0]);
  5381. }
  5382. case NEON::BI__builtin_neon_vld3_v:
  5383. case NEON::BI__builtin_neon_vld3q_v: {
  5384. llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
  5385. Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
  5386. llvm::Type *Tys[2] = { VTy, PTy };
  5387. Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
  5388. Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
  5389. Ops[0] = Builder.CreateBitCast(Ops[0],
  5390. llvm::PointerType::getUnqual(Ops[1]->getType()));
  5391. return Builder.CreateStore(Ops[1], Ops[0]);
  5392. }
  5393. case NEON::BI__builtin_neon_vld4_v:
  5394. case NEON::BI__builtin_neon_vld4q_v: {
  5395. llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
  5396. Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
  5397. llvm::Type *Tys[2] = { VTy, PTy };
  5398. Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
  5399. Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
  5400. Ops[0] = Builder.CreateBitCast(Ops[0],
  5401. llvm::PointerType::getUnqual(Ops[1]->getType()));
  5402. return Builder.CreateStore(Ops[1], Ops[0]);
  5403. }
  5404. case NEON::BI__builtin_neon_vld2_dup_v:
  5405. case NEON::BI__builtin_neon_vld2q_dup_v: {
  5406. llvm::Type *PTy =
  5407. llvm::PointerType::getUnqual(VTy->getElementType());
  5408. Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
  5409. llvm::Type *Tys[2] = { VTy, PTy };
  5410. Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
  5411. Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
  5412. Ops[0] = Builder.CreateBitCast(Ops[0],
  5413. llvm::PointerType::getUnqual(Ops[1]->getType()));
  5414. return Builder.CreateStore(Ops[1], Ops[0]);
  5415. }
  5416. case NEON::BI__builtin_neon_vld3_dup_v:
  5417. case NEON::BI__builtin_neon_vld3q_dup_v: {
  5418. llvm::Type *PTy =
  5419. llvm::PointerType::getUnqual(VTy->getElementType());
  5420. Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
  5421. llvm::Type *Tys[2] = { VTy, PTy };
  5422. Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
  5423. Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
  5424. Ops[0] = Builder.CreateBitCast(Ops[0],
  5425. llvm::PointerType::getUnqual(Ops[1]->getType()));
  5426. return Builder.CreateStore(Ops[1], Ops[0]);
  5427. }
  5428. case NEON::BI__builtin_neon_vld4_dup_v:
  5429. case NEON::BI__builtin_neon_vld4q_dup_v: {
  5430. llvm::Type *PTy =
  5431. llvm::PointerType::getUnqual(VTy->getElementType());
  5432. Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
  5433. llvm::Type *Tys[2] = { VTy, PTy };
  5434. Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
  5435. Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
  5436. Ops[0] = Builder.CreateBitCast(Ops[0],
  5437. llvm::PointerType::getUnqual(Ops[1]->getType()));
  5438. return Builder.CreateStore(Ops[1], Ops[0]);
  5439. }
  5440. case NEON::BI__builtin_neon_vld2_lane_v:
  5441. case NEON::BI__builtin_neon_vld2q_lane_v: {
  5442. llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
  5443. Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys);
  5444. Ops.push_back(Ops[1]);
  5445. Ops.erase(Ops.begin()+1);
  5446. Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
  5447. Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
  5448. Ops[3] = Builder.CreateZExt(Ops[3],
  5449. llvm::IntegerType::get(getLLVMContext(), 64));
  5450. Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
  5451. Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
  5452. Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
  5453. return Builder.CreateStore(Ops[1], Ops[0]);
  5454. }
  5455. case NEON::BI__builtin_neon_vld3_lane_v:
  5456. case NEON::BI__builtin_neon_vld3q_lane_v: {
  5457. llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
  5458. Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys);
  5459. Ops.push_back(Ops[1]);
  5460. Ops.erase(Ops.begin()+1);
  5461. Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
  5462. Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
  5463. Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
  5464. Ops[4] = Builder.CreateZExt(Ops[4],
  5465. llvm::IntegerType::get(getLLVMContext(), 64));
  5466. Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
  5467. Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
  5468. Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
  5469. return Builder.CreateStore(Ops[1], Ops[0]);
  5470. }
  5471. case NEON::BI__builtin_neon_vld4_lane_v:
  5472. case NEON::BI__builtin_neon_vld4q_lane_v: {
  5473. llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
  5474. Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys);
  5475. Ops.push_back(Ops[1]);
  5476. Ops.erase(Ops.begin()+1);
  5477. Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
  5478. Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
  5479. Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
  5480. Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
  5481. Ops[5] = Builder.CreateZExt(Ops[5],
  5482. llvm::IntegerType::get(getLLVMContext(), 64));
  5483. Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane");
  5484. Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
  5485. Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
  5486. return Builder.CreateStore(Ops[1], Ops[0]);
  5487. }
  5488. case NEON::BI__builtin_neon_vst2_v:
  5489. case NEON::BI__builtin_neon_vst2q_v: {
  5490. Ops.push_back(Ops[0]);
  5491. Ops.erase(Ops.begin());
  5492. llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
  5493. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys),
  5494. Ops, "");
  5495. }
  5496. case NEON::BI__builtin_neon_vst2_lane_v:
  5497. case NEON::BI__builtin_neon_vst2q_lane_v: {
  5498. Ops.push_back(Ops[0]);
  5499. Ops.erase(Ops.begin());
  5500. Ops[2] = Builder.CreateZExt(Ops[2],
  5501. llvm::IntegerType::get(getLLVMContext(), 64));
  5502. llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
  5503. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys),
  5504. Ops, "");
  5505. }
  5506. case NEON::BI__builtin_neon_vst3_v:
  5507. case NEON::BI__builtin_neon_vst3q_v: {
  5508. Ops.push_back(Ops[0]);
  5509. Ops.erase(Ops.begin());
  5510. llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
  5511. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys),
  5512. Ops, "");
  5513. }
  5514. case NEON::BI__builtin_neon_vst3_lane_v:
  5515. case NEON::BI__builtin_neon_vst3q_lane_v: {
  5516. Ops.push_back(Ops[0]);
  5517. Ops.erase(Ops.begin());
  5518. Ops[3] = Builder.CreateZExt(Ops[3],
  5519. llvm::IntegerType::get(getLLVMContext(), 64));
  5520. llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
  5521. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys),
  5522. Ops, "");
  5523. }
  5524. case NEON::BI__builtin_neon_vst4_v:
  5525. case NEON::BI__builtin_neon_vst4q_v: {
  5526. Ops.push_back(Ops[0]);
  5527. Ops.erase(Ops.begin());
  5528. llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
  5529. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys),
  5530. Ops, "");
  5531. }
  5532. case NEON::BI__builtin_neon_vst4_lane_v:
  5533. case NEON::BI__builtin_neon_vst4q_lane_v: {
  5534. Ops.push_back(Ops[0]);
  5535. Ops.erase(Ops.begin());
  5536. Ops[4] = Builder.CreateZExt(Ops[4],
  5537. llvm::IntegerType::get(getLLVMContext(), 64));
  5538. llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
  5539. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys),
  5540. Ops, "");
  5541. }
  5542. case NEON::BI__builtin_neon_vtrn_v:
  5543. case NEON::BI__builtin_neon_vtrnq_v: {
  5544. Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
  5545. Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
  5546. Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
  5547. Value *SV = nullptr;
  5548. for (unsigned vi = 0; vi != 2; ++vi) {
  5549. SmallVector<Constant*, 16> Indices;
  5550. for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
  5551. Indices.push_back(ConstantInt::get(Int32Ty, i+vi));
  5552. Indices.push_back(ConstantInt::get(Int32Ty, i+e+vi));
  5553. }
  5554. Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
  5555. SV = llvm::ConstantVector::get(Indices);
  5556. SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn");
  5557. SV = Builder.CreateStore(SV, Addr);
  5558. }
  5559. return SV;
  5560. }
  5561. case NEON::BI__builtin_neon_vuzp_v:
  5562. case NEON::BI__builtin_neon_vuzpq_v: {
  5563. Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
  5564. Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
  5565. Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
  5566. Value *SV = nullptr;
  5567. for (unsigned vi = 0; vi != 2; ++vi) {
  5568. SmallVector<Constant*, 16> Indices;
  5569. for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
  5570. Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi));
  5571. Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
  5572. SV = llvm::ConstantVector::get(Indices);
  5573. SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp");
  5574. SV = Builder.CreateStore(SV, Addr);
  5575. }
  5576. return SV;
  5577. }
  5578. case NEON::BI__builtin_neon_vzip_v:
  5579. case NEON::BI__builtin_neon_vzipq_v: {
  5580. Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
  5581. Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
  5582. Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
  5583. Value *SV = nullptr;
  5584. for (unsigned vi = 0; vi != 2; ++vi) {
  5585. SmallVector<Constant*, 16> Indices;
  5586. for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
  5587. Indices.push_back(ConstantInt::get(Int32Ty, (i + vi*e) >> 1));
  5588. Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e));
  5589. }
  5590. Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
  5591. SV = llvm::ConstantVector::get(Indices);
  5592. SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip");
  5593. SV = Builder.CreateStore(SV, Addr);
  5594. }
  5595. return SV;
  5596. }
  5597. case NEON::BI__builtin_neon_vqtbl1q_v: {
  5598. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty),
  5599. Ops, "vtbl1");
  5600. }
  5601. case NEON::BI__builtin_neon_vqtbl2q_v: {
  5602. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty),
  5603. Ops, "vtbl2");
  5604. }
  5605. case NEON::BI__builtin_neon_vqtbl3q_v: {
  5606. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty),
  5607. Ops, "vtbl3");
  5608. }
  5609. case NEON::BI__builtin_neon_vqtbl4q_v: {
  5610. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty),
  5611. Ops, "vtbl4");
  5612. }
  5613. case NEON::BI__builtin_neon_vqtbx1q_v: {
  5614. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty),
  5615. Ops, "vtbx1");
  5616. }
  5617. case NEON::BI__builtin_neon_vqtbx2q_v: {
  5618. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty),
  5619. Ops, "vtbx2");
  5620. }
  5621. case NEON::BI__builtin_neon_vqtbx3q_v: {
  5622. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty),
  5623. Ops, "vtbx3");
  5624. }
  5625. case NEON::BI__builtin_neon_vqtbx4q_v: {
  5626. return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty),
  5627. Ops, "vtbx4");
  5628. }
  5629. case NEON::BI__builtin_neon_vsqadd_v:
  5630. case NEON::BI__builtin_neon_vsqaddq_v: {
  5631. Int = Intrinsic::aarch64_neon_usqadd;
  5632. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd");
  5633. }
  5634. case NEON::BI__builtin_neon_vuqadd_v:
  5635. case NEON::BI__builtin_neon_vuqaddq_v: {
  5636. Int = Intrinsic::aarch64_neon_suqadd;
  5637. return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
  5638. }
  5639. }
  5640. }
  5641. llvm::Value *CodeGenFunction::
  5642. BuildVector(ArrayRef<llvm::Value*> Ops) {
  5643. assert((Ops.size() & (Ops.size() - 1)) == 0 &&
  5644. "Not a power-of-two sized vector!");
  5645. bool AllConstants = true;
  5646. for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
  5647. AllConstants &= isa<Constant>(Ops[i]);
  5648. // If this is a constant vector, create a ConstantVector.
  5649. if (AllConstants) {
  5650. SmallVector<llvm::Constant*, 16> CstOps;
  5651. for (unsigned i = 0, e = Ops.size(); i != e; ++i)
  5652. CstOps.push_back(cast<Constant>(Ops[i]));
  5653. return llvm::ConstantVector::get(CstOps);
  5654. }
  5655. // Otherwise, insertelement the values to build the vector.
  5656. Value *Result =
  5657. llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size()));
  5658. for (unsigned i = 0, e = Ops.size(); i != e; ++i)
  5659. Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
  5660. return Result;
  5661. }
  5662. Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
  5663. const CallExpr *E) {
  5664. SmallVector<Value*, 4> Ops;
  5665. // Find out if any arguments are required to be integer constant expressions.
  5666. unsigned ICEArguments = 0;
  5667. ASTContext::GetBuiltinTypeError Error;
  5668. getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
  5669. assert(Error == ASTContext::GE_None && "Should not codegen an error");
  5670. for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
  5671. // If this is a normal argument, just emit it as a scalar.
  5672. if ((ICEArguments & (1 << i)) == 0) {
  5673. Ops.push_back(EmitScalarExpr(E->getArg(i)));
  5674. continue;
  5675. }
  5676. // If this is required to be a constant, constant fold it so that we know
  5677. // that the generated intrinsic gets a ConstantInt.
  5678. llvm::APSInt Result;
  5679. bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
  5680. assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
  5681. Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
  5682. }
  5683. switch (BuiltinID) {
  5684. default: return nullptr;
  5685. case X86::BI__builtin_cpu_supports: {
  5686. const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts();
  5687. StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString();
  5688. // TODO: When/if this becomes more than x86 specific then use a TargetInfo
  5689. // based mapping.
  5690. // Processor features and mapping to processor feature value.
  5691. enum X86Features {
  5692. CMOV = 0,
  5693. MMX,
  5694. POPCNT,
  5695. SSE,
  5696. SSE2,
  5697. SSE3,
  5698. SSSE3,
  5699. SSE4_1,
  5700. SSE4_2,
  5701. AVX,
  5702. AVX2,
  5703. SSE4_A,
  5704. FMA4,
  5705. XOP,
  5706. FMA,
  5707. AVX512F,
  5708. BMI,
  5709. BMI2,
  5710. MAX
  5711. };
  5712. X86Features Feature = StringSwitch<X86Features>(FeatureStr)
  5713. .Case("cmov", X86Features::CMOV)
  5714. .Case("mmx", X86Features::MMX)
  5715. .Case("popcnt", X86Features::POPCNT)
  5716. .Case("sse", X86Features::SSE)
  5717. .Case("sse2", X86Features::SSE2)
  5718. .Case("sse3", X86Features::SSE3)
  5719. .Case("sse4.1", X86Features::SSE4_1)
  5720. .Case("sse4.2", X86Features::SSE4_2)
  5721. .Case("avx", X86Features::AVX)
  5722. .Case("avx2", X86Features::AVX2)
  5723. .Case("sse4a", X86Features::SSE4_A)
  5724. .Case("fma4", X86Features::FMA4)
  5725. .Case("xop", X86Features::XOP)
  5726. .Case("fma", X86Features::FMA)
  5727. .Case("avx512f", X86Features::AVX512F)
  5728. .Case("bmi", X86Features::BMI)
  5729. .Case("bmi2", X86Features::BMI2)
  5730. .Default(X86Features::MAX);
  5731. assert(Feature != X86Features::MAX && "Invalid feature!");
  5732. // Matching the struct layout from the compiler-rt/libgcc structure that is
  5733. // filled in:
  5734. // unsigned int __cpu_vendor;
  5735. // unsigned int __cpu_type;
  5736. // unsigned int __cpu_subtype;
  5737. // unsigned int __cpu_features[1];
  5738. llvm::Type *STy = llvm::StructType::get(
  5739. Int32Ty, Int32Ty, Int32Ty, llvm::ArrayType::get(Int32Ty, 1), nullptr);
  5740. // Grab the global __cpu_model.
  5741. llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
  5742. // Grab the first (0th) element from the field __cpu_features off of the
  5743. // global in the struct STy.
  5744. Value *Idxs[] = {
  5745. ConstantInt::get(Int32Ty, 0),
  5746. ConstantInt::get(Int32Ty, 3),
  5747. ConstantInt::get(Int32Ty, 0)
  5748. };
  5749. Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs);
  5750. Value *Features = Builder.CreateLoad(CpuFeatures);
  5751. // Check the value of the bit corresponding to the feature requested.
  5752. Value *Bitset = Builder.CreateAnd(
  5753. Features, llvm::ConstantInt::get(Int32Ty, 1 << Feature));
  5754. return Builder.CreateICmpNE(Bitset, llvm::ConstantInt::get(Int32Ty, 0));
  5755. }
  5756. case X86::BI_mm_prefetch: {
  5757. Value *Address = EmitScalarExpr(E->getArg(0));
  5758. Value *RW = ConstantInt::get(Int32Ty, 0);
  5759. Value *Locality = EmitScalarExpr(E->getArg(1));
  5760. Value *Data = ConstantInt::get(Int32Ty, 1);
  5761. Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
  5762. return Builder.CreateCall(F, {Address, RW, Locality, Data});
  5763. }
  5764. case X86::BI__builtin_ia32_vec_init_v8qi:
  5765. case X86::BI__builtin_ia32_vec_init_v4hi:
  5766. case X86::BI__builtin_ia32_vec_init_v2si:
  5767. return Builder.CreateBitCast(BuildVector(Ops),
  5768. llvm::Type::getX86_MMXTy(getLLVMContext()));
  5769. case X86::BI__builtin_ia32_vec_ext_v2si:
  5770. return Builder.CreateExtractElement(Ops[0],
  5771. llvm::ConstantInt::get(Ops[1]->getType(), 0));
  5772. case X86::BI__builtin_ia32_ldmxcsr: {
  5773. Value *Tmp = CreateMemTemp(E->getArg(0)->getType());
  5774. Builder.CreateStore(Ops[0], Tmp);
  5775. return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
  5776. Builder.CreateBitCast(Tmp, Int8PtrTy));
  5777. }
  5778. case X86::BI__builtin_ia32_stmxcsr: {
  5779. Value *Tmp = CreateMemTemp(E->getType());
  5780. Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
  5781. Builder.CreateBitCast(Tmp, Int8PtrTy));
  5782. return Builder.CreateLoad(Tmp, "stmxcsr");
  5783. }
  5784. case X86::BI__builtin_ia32_storehps:
  5785. case X86::BI__builtin_ia32_storelps: {
  5786. llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty);
  5787. llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
  5788. // cast val v2i64
  5789. Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast");
  5790. // extract (0, 1)
  5791. unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
  5792. llvm::Value *Idx = llvm::ConstantInt::get(SizeTy, Index);
  5793. Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract");
  5794. // cast pointer to i64 & store
  5795. Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
  5796. return Builder.CreateStore(Ops[1], Ops[0]);
  5797. }
  5798. case X86::BI__builtin_ia32_palignr128:
  5799. case X86::BI__builtin_ia32_palignr256: {
  5800. unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
  5801. unsigned NumElts =
  5802. cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
  5803. assert(NumElts % 16 == 0);
  5804. unsigned NumLanes = NumElts / 16;
  5805. unsigned NumLaneElts = NumElts / NumLanes;
  5806. // If palignr is shifting the pair of vectors more than the size of two
  5807. // lanes, emit zero.
  5808. if (ShiftVal >= (2 * NumLaneElts))
  5809. return llvm::Constant::getNullValue(ConvertType(E->getType()));
  5810. // If palignr is shifting the pair of input vectors more than one lane,
  5811. // but less than two lanes, convert to shifting in zeroes.
  5812. if (ShiftVal > NumLaneElts) {
  5813. ShiftVal -= NumLaneElts;
  5814. Ops[1] = Ops[0];
  5815. Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType());
  5816. }
  5817. SmallVector<llvm::Constant*, 32> Indices;
  5818. // 256-bit palignr operates on 128-bit lanes so we need to handle that
  5819. for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
  5820. for (unsigned i = 0; i != NumLaneElts; ++i) {
  5821. unsigned Idx = ShiftVal + i;
  5822. if (Idx >= NumLaneElts)
  5823. Idx += NumElts - NumLaneElts; // End of lane, switch operand.
  5824. Indices.push_back(llvm::ConstantInt::get(Int32Ty, Idx + l));
  5825. }
  5826. }
  5827. Value* SV = llvm::ConstantVector::get(Indices);
  5828. return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
  5829. }
  5830. case X86::BI__builtin_ia32_pslldqi256: {
  5831. // Shift value is in bits so divide by 8.
  5832. unsigned shiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() >> 3;
  5833. // If pslldq is shifting the vector more than 15 bytes, emit zero.
  5834. if (shiftVal >= 16)
  5835. return llvm::Constant::getNullValue(ConvertType(E->getType()));
  5836. SmallVector<llvm::Constant*, 32> Indices;
  5837. // 256-bit pslldq operates on 128-bit lanes so we need to handle that
  5838. for (unsigned l = 0; l != 32; l += 16) {
  5839. for (unsigned i = 0; i != 16; ++i) {
  5840. unsigned Idx = 32 + i - shiftVal;
  5841. if (Idx < 32) Idx -= 16; // end of lane, switch operand.
  5842. Indices.push_back(llvm::ConstantInt::get(Int32Ty, Idx + l));
  5843. }
  5844. }
  5845. llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, 32);
  5846. Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
  5847. Value *Zero = llvm::Constant::getNullValue(VecTy);
  5848. Value *SV = llvm::ConstantVector::get(Indices);
  5849. SV = Builder.CreateShuffleVector(Zero, Ops[0], SV, "pslldq");
  5850. llvm::Type *ResultType = ConvertType(E->getType());
  5851. return Builder.CreateBitCast(SV, ResultType, "cast");
  5852. }
  5853. case X86::BI__builtin_ia32_psrldqi256: {
  5854. // Shift value is in bits so divide by 8.
  5855. unsigned shiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() >> 3;
  5856. // If psrldq is shifting the vector more than 15 bytes, emit zero.
  5857. if (shiftVal >= 16)
  5858. return llvm::Constant::getNullValue(ConvertType(E->getType()));
  5859. SmallVector<llvm::Constant*, 32> Indices;
  5860. // 256-bit psrldq operates on 128-bit lanes so we need to handle that
  5861. for (unsigned l = 0; l != 32; l += 16) {
  5862. for (unsigned i = 0; i != 16; ++i) {
  5863. unsigned Idx = i + shiftVal;
  5864. if (Idx >= 16) Idx += 16; // end of lane, switch operand.
  5865. Indices.push_back(llvm::ConstantInt::get(Int32Ty, Idx + l));
  5866. }
  5867. }
  5868. llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, 32);
  5869. Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
  5870. Value *Zero = llvm::Constant::getNullValue(VecTy);
  5871. Value *SV = llvm::ConstantVector::get(Indices);
  5872. SV = Builder.CreateShuffleVector(Ops[0], Zero, SV, "psrldq");
  5873. llvm::Type *ResultType = ConvertType(E->getType());
  5874. return Builder.CreateBitCast(SV, ResultType, "cast");
  5875. }
  5876. case X86::BI__builtin_ia32_movntps:
  5877. case X86::BI__builtin_ia32_movntps256:
  5878. case X86::BI__builtin_ia32_movntpd:
  5879. case X86::BI__builtin_ia32_movntpd256:
  5880. case X86::BI__builtin_ia32_movntdq:
  5881. case X86::BI__builtin_ia32_movntdq256:
  5882. case X86::BI__builtin_ia32_movnti:
  5883. case X86::BI__builtin_ia32_movnti64: {
  5884. llvm::MDNode *Node = llvm::MDNode::get(
  5885. getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
  5886. // Convert the type of the pointer to a pointer to the stored type.
  5887. Value *BC = Builder.CreateBitCast(Ops[0],
  5888. llvm::PointerType::getUnqual(Ops[1]->getType()),
  5889. "cast");
  5890. StoreInst *SI = Builder.CreateStore(Ops[1], BC);
  5891. SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
  5892. // If the operand is an integer, we can't assume alignment. Otherwise,
  5893. // assume natural alignment.
  5894. QualType ArgTy = E->getArg(1)->getType();
  5895. unsigned Align;
  5896. if (ArgTy->isIntegerType())
  5897. Align = 1;
  5898. else
  5899. Align = getContext().getTypeSizeInChars(ArgTy).getQuantity();
  5900. SI->setAlignment(Align);
  5901. return SI;
  5902. }
  5903. // 3DNow!
  5904. case X86::BI__builtin_ia32_pswapdsf:
  5905. case X86::BI__builtin_ia32_pswapdsi: {
  5906. llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
  5907. Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
  5908. llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_3dnowa_pswapd);
  5909. return Builder.CreateCall(F, Ops, "pswapd");
  5910. }
  5911. case X86::BI__builtin_ia32_rdrand16_step:
  5912. case X86::BI__builtin_ia32_rdrand32_step:
  5913. case X86::BI__builtin_ia32_rdrand64_step:
  5914. case X86::BI__builtin_ia32_rdseed16_step:
  5915. case X86::BI__builtin_ia32_rdseed32_step:
  5916. case X86::BI__builtin_ia32_rdseed64_step: {
  5917. Intrinsic::ID ID;
  5918. switch (BuiltinID) {
  5919. default: llvm_unreachable("Unsupported intrinsic!");
  5920. case X86::BI__builtin_ia32_rdrand16_step:
  5921. ID = Intrinsic::x86_rdrand_16;
  5922. break;
  5923. case X86::BI__builtin_ia32_rdrand32_step:
  5924. ID = Intrinsic::x86_rdrand_32;
  5925. break;
  5926. case X86::BI__builtin_ia32_rdrand64_step:
  5927. ID = Intrinsic::x86_rdrand_64;
  5928. break;
  5929. case X86::BI__builtin_ia32_rdseed16_step:
  5930. ID = Intrinsic::x86_rdseed_16;
  5931. break;
  5932. case X86::BI__builtin_ia32_rdseed32_step:
  5933. ID = Intrinsic::x86_rdseed_32;
  5934. break;
  5935. case X86::BI__builtin_ia32_rdseed64_step:
  5936. ID = Intrinsic::x86_rdseed_64;
  5937. break;
  5938. }
  5939. Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
  5940. Builder.CreateStore(Builder.CreateExtractValue(Call, 0), Ops[0]);
  5941. return Builder.CreateExtractValue(Call, 1);
  5942. }
  5943. // SSE comparison intrisics
  5944. case X86::BI__builtin_ia32_cmpeqps:
  5945. case X86::BI__builtin_ia32_cmpltps:
  5946. case X86::BI__builtin_ia32_cmpleps:
  5947. case X86::BI__builtin_ia32_cmpunordps:
  5948. case X86::BI__builtin_ia32_cmpneqps:
  5949. case X86::BI__builtin_ia32_cmpnltps:
  5950. case X86::BI__builtin_ia32_cmpnleps:
  5951. case X86::BI__builtin_ia32_cmpordps:
  5952. case X86::BI__builtin_ia32_cmpeqss:
  5953. case X86::BI__builtin_ia32_cmpltss:
  5954. case X86::BI__builtin_ia32_cmpless:
  5955. case X86::BI__builtin_ia32_cmpunordss:
  5956. case X86::BI__builtin_ia32_cmpneqss:
  5957. case X86::BI__builtin_ia32_cmpnltss:
  5958. case X86::BI__builtin_ia32_cmpnless:
  5959. case X86::BI__builtin_ia32_cmpordss:
  5960. case X86::BI__builtin_ia32_cmpeqpd:
  5961. case X86::BI__builtin_ia32_cmpltpd:
  5962. case X86::BI__builtin_ia32_cmplepd:
  5963. case X86::BI__builtin_ia32_cmpunordpd:
  5964. case X86::BI__builtin_ia32_cmpneqpd:
  5965. case X86::BI__builtin_ia32_cmpnltpd:
  5966. case X86::BI__builtin_ia32_cmpnlepd:
  5967. case X86::BI__builtin_ia32_cmpordpd:
  5968. case X86::BI__builtin_ia32_cmpeqsd:
  5969. case X86::BI__builtin_ia32_cmpltsd:
  5970. case X86::BI__builtin_ia32_cmplesd:
  5971. case X86::BI__builtin_ia32_cmpunordsd:
  5972. case X86::BI__builtin_ia32_cmpneqsd:
  5973. case X86::BI__builtin_ia32_cmpnltsd:
  5974. case X86::BI__builtin_ia32_cmpnlesd:
  5975. case X86::BI__builtin_ia32_cmpordsd:
  5976. // These exist so that the builtin that takes an immediate can be bounds
  5977. // checked by clang to avoid passing bad immediates to the backend. Since
  5978. // AVX has a larger immediate than SSE we would need separate builtins to
  5979. // do the different bounds checking. Rather than create a clang specific
  5980. // SSE only builtin, this implements eight separate builtins to match gcc
  5981. // implementation.
  5982. // Choose the immediate.
  5983. unsigned Imm;
  5984. switch (BuiltinID) {
  5985. default: llvm_unreachable("Unsupported intrinsic!");
  5986. case X86::BI__builtin_ia32_cmpeqps:
  5987. case X86::BI__builtin_ia32_cmpeqss:
  5988. case X86::BI__builtin_ia32_cmpeqpd:
  5989. case X86::BI__builtin_ia32_cmpeqsd:
  5990. Imm = 0;
  5991. break;
  5992. case X86::BI__builtin_ia32_cmpltps:
  5993. case X86::BI__builtin_ia32_cmpltss:
  5994. case X86::BI__builtin_ia32_cmpltpd:
  5995. case X86::BI__builtin_ia32_cmpltsd:
  5996. Imm = 1;
  5997. break;
  5998. case X86::BI__builtin_ia32_cmpleps:
  5999. case X86::BI__builtin_ia32_cmpless:
  6000. case X86::BI__builtin_ia32_cmplepd:
  6001. case X86::BI__builtin_ia32_cmplesd:
  6002. Imm = 2;
  6003. break;
  6004. case X86::BI__builtin_ia32_cmpunordps:
  6005. case X86::BI__builtin_ia32_cmpunordss:
  6006. case X86::BI__builtin_ia32_cmpunordpd:
  6007. case X86::BI__builtin_ia32_cmpunordsd:
  6008. Imm = 3;
  6009. break;
  6010. case X86::BI__builtin_ia32_cmpneqps:
  6011. case X86::BI__builtin_ia32_cmpneqss:
  6012. case X86::BI__builtin_ia32_cmpneqpd:
  6013. case X86::BI__builtin_ia32_cmpneqsd:
  6014. Imm = 4;
  6015. break;
  6016. case X86::BI__builtin_ia32_cmpnltps:
  6017. case X86::BI__builtin_ia32_cmpnltss:
  6018. case X86::BI__builtin_ia32_cmpnltpd:
  6019. case X86::BI__builtin_ia32_cmpnltsd:
  6020. Imm = 5;
  6021. break;
  6022. case X86::BI__builtin_ia32_cmpnleps:
  6023. case X86::BI__builtin_ia32_cmpnless:
  6024. case X86::BI__builtin_ia32_cmpnlepd:
  6025. case X86::BI__builtin_ia32_cmpnlesd:
  6026. Imm = 6;
  6027. break;
  6028. case X86::BI__builtin_ia32_cmpordps:
  6029. case X86::BI__builtin_ia32_cmpordss:
  6030. case X86::BI__builtin_ia32_cmpordpd:
  6031. case X86::BI__builtin_ia32_cmpordsd:
  6032. Imm = 7;
  6033. break;
  6034. }
  6035. // Choose the intrinsic ID.
  6036. const char *name;
  6037. Intrinsic::ID ID;
  6038. switch (BuiltinID) {
  6039. default: llvm_unreachable("Unsupported intrinsic!");
  6040. case X86::BI__builtin_ia32_cmpeqps:
  6041. case X86::BI__builtin_ia32_cmpltps:
  6042. case X86::BI__builtin_ia32_cmpleps:
  6043. case X86::BI__builtin_ia32_cmpunordps:
  6044. case X86::BI__builtin_ia32_cmpneqps:
  6045. case X86::BI__builtin_ia32_cmpnltps:
  6046. case X86::BI__builtin_ia32_cmpnleps:
  6047. case X86::BI__builtin_ia32_cmpordps:
  6048. name = "cmpps";
  6049. ID = Intrinsic::x86_sse_cmp_ps;
  6050. break;
  6051. case X86::BI__builtin_ia32_cmpeqss:
  6052. case X86::BI__builtin_ia32_cmpltss:
  6053. case X86::BI__builtin_ia32_cmpless:
  6054. case X86::BI__builtin_ia32_cmpunordss:
  6055. case X86::BI__builtin_ia32_cmpneqss:
  6056. case X86::BI__builtin_ia32_cmpnltss:
  6057. case X86::BI__builtin_ia32_cmpnless:
  6058. case X86::BI__builtin_ia32_cmpordss:
  6059. name = "cmpss";
  6060. ID = Intrinsic::x86_sse_cmp_ss;
  6061. break;
  6062. case X86::BI__builtin_ia32_cmpeqpd:
  6063. case X86::BI__builtin_ia32_cmpltpd:
  6064. case X86::BI__builtin_ia32_cmplepd:
  6065. case X86::BI__builtin_ia32_cmpunordpd:
  6066. case X86::BI__builtin_ia32_cmpneqpd:
  6067. case X86::BI__builtin_ia32_cmpnltpd:
  6068. case X86::BI__builtin_ia32_cmpnlepd:
  6069. case X86::BI__builtin_ia32_cmpordpd:
  6070. name = "cmppd";
  6071. ID = Intrinsic::x86_sse2_cmp_pd;
  6072. break;
  6073. case X86::BI__builtin_ia32_cmpeqsd:
  6074. case X86::BI__builtin_ia32_cmpltsd:
  6075. case X86::BI__builtin_ia32_cmplesd:
  6076. case X86::BI__builtin_ia32_cmpunordsd:
  6077. case X86::BI__builtin_ia32_cmpneqsd:
  6078. case X86::BI__builtin_ia32_cmpnltsd:
  6079. case X86::BI__builtin_ia32_cmpnlesd:
  6080. case X86::BI__builtin_ia32_cmpordsd:
  6081. name = "cmpsd";
  6082. ID = Intrinsic::x86_sse2_cmp_sd;
  6083. break;
  6084. }
  6085. Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm));
  6086. llvm::Function *F = CGM.getIntrinsic(ID);
  6087. return Builder.CreateCall(F, Ops, name);
  6088. }
  6089. }
  6090. Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
  6091. const CallExpr *E) {
  6092. SmallVector<Value*, 4> Ops;
  6093. for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
  6094. Ops.push_back(EmitScalarExpr(E->getArg(i)));
  6095. Intrinsic::ID ID = Intrinsic::not_intrinsic;
  6096. switch (BuiltinID) {
  6097. default: return nullptr;
  6098. // vec_ld, vec_lvsl, vec_lvsr
  6099. case PPC::BI__builtin_altivec_lvx:
  6100. case PPC::BI__builtin_altivec_lvxl:
  6101. case PPC::BI__builtin_altivec_lvebx:
  6102. case PPC::BI__builtin_altivec_lvehx:
  6103. case PPC::BI__builtin_altivec_lvewx:
  6104. case PPC::BI__builtin_altivec_lvsl:
  6105. case PPC::BI__builtin_altivec_lvsr:
  6106. case PPC::BI__builtin_vsx_lxvd2x:
  6107. case PPC::BI__builtin_vsx_lxvw4x:
  6108. {
  6109. Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
  6110. Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
  6111. Ops.pop_back();
  6112. switch (BuiltinID) {
  6113. default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
  6114. case PPC::BI__builtin_altivec_lvx:
  6115. ID = Intrinsic::ppc_altivec_lvx;
  6116. break;
  6117. case PPC::BI__builtin_altivec_lvxl:
  6118. ID = Intrinsic::ppc_altivec_lvxl;
  6119. break;
  6120. case PPC::BI__builtin_altivec_lvebx:
  6121. ID = Intrinsic::ppc_altivec_lvebx;
  6122. break;
  6123. case PPC::BI__builtin_altivec_lvehx:
  6124. ID = Intrinsic::ppc_altivec_lvehx;
  6125. break;
  6126. case PPC::BI__builtin_altivec_lvewx:
  6127. ID = Intrinsic::ppc_altivec_lvewx;
  6128. break;
  6129. case PPC::BI__builtin_altivec_lvsl:
  6130. ID = Intrinsic::ppc_altivec_lvsl;
  6131. break;
  6132. case PPC::BI__builtin_altivec_lvsr:
  6133. ID = Intrinsic::ppc_altivec_lvsr;
  6134. break;
  6135. case PPC::BI__builtin_vsx_lxvd2x:
  6136. ID = Intrinsic::ppc_vsx_lxvd2x;
  6137. break;
  6138. case PPC::BI__builtin_vsx_lxvw4x:
  6139. ID = Intrinsic::ppc_vsx_lxvw4x;
  6140. break;
  6141. }
  6142. llvm::Function *F = CGM.getIntrinsic(ID);
  6143. return Builder.CreateCall(F, Ops, "");
  6144. }
  6145. // vec_st
  6146. case PPC::BI__builtin_altivec_stvx:
  6147. case PPC::BI__builtin_altivec_stvxl:
  6148. case PPC::BI__builtin_altivec_stvebx:
  6149. case PPC::BI__builtin_altivec_stvehx:
  6150. case PPC::BI__builtin_altivec_stvewx:
  6151. case PPC::BI__builtin_vsx_stxvd2x:
  6152. case PPC::BI__builtin_vsx_stxvw4x:
  6153. {
  6154. Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
  6155. Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
  6156. Ops.pop_back();
  6157. switch (BuiltinID) {
  6158. default: llvm_unreachable("Unsupported st intrinsic!");
  6159. case PPC::BI__builtin_altivec_stvx:
  6160. ID = Intrinsic::ppc_altivec_stvx;
  6161. break;
  6162. case PPC::BI__builtin_altivec_stvxl:
  6163. ID = Intrinsic::ppc_altivec_stvxl;
  6164. break;
  6165. case PPC::BI__builtin_altivec_stvebx:
  6166. ID = Intrinsic::ppc_altivec_stvebx;
  6167. break;
  6168. case PPC::BI__builtin_altivec_stvehx:
  6169. ID = Intrinsic::ppc_altivec_stvehx;
  6170. break;
  6171. case PPC::BI__builtin_altivec_stvewx:
  6172. ID = Intrinsic::ppc_altivec_stvewx;
  6173. break;
  6174. case PPC::BI__builtin_vsx_stxvd2x:
  6175. ID = Intrinsic::ppc_vsx_stxvd2x;
  6176. break;
  6177. case PPC::BI__builtin_vsx_stxvw4x:
  6178. ID = Intrinsic::ppc_vsx_stxvw4x;
  6179. break;
  6180. }
  6181. llvm::Function *F = CGM.getIntrinsic(ID);
  6182. return Builder.CreateCall(F, Ops, "");
  6183. }
  6184. // Square root
  6185. case PPC::BI__builtin_vsx_xvsqrtsp:
  6186. case PPC::BI__builtin_vsx_xvsqrtdp: {
  6187. llvm::Type *ResultType = ConvertType(E->getType());
  6188. Value *X = EmitScalarExpr(E->getArg(0));
  6189. ID = Intrinsic::sqrt;
  6190. llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
  6191. return Builder.CreateCall(F, X);
  6192. }
  6193. // Count leading zeros
  6194. case PPC::BI__builtin_altivec_vclzb:
  6195. case PPC::BI__builtin_altivec_vclzh:
  6196. case PPC::BI__builtin_altivec_vclzw:
  6197. case PPC::BI__builtin_altivec_vclzd: {
  6198. llvm::Type *ResultType = ConvertType(E->getType());
  6199. Value *X = EmitScalarExpr(E->getArg(0));
  6200. Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
  6201. Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
  6202. return Builder.CreateCall(F, {X, Undef});
  6203. }
  6204. // Copy sign
  6205. case PPC::BI__builtin_vsx_xvcpsgnsp:
  6206. case PPC::BI__builtin_vsx_xvcpsgndp: {
  6207. llvm::Type *ResultType = ConvertType(E->getType());
  6208. Value *X = EmitScalarExpr(E->getArg(0));
  6209. Value *Y = EmitScalarExpr(E->getArg(1));
  6210. ID = Intrinsic::copysign;
  6211. llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
  6212. return Builder.CreateCall(F, {X, Y});
  6213. }
  6214. // Rounding/truncation
  6215. case PPC::BI__builtin_vsx_xvrspip:
  6216. case PPC::BI__builtin_vsx_xvrdpip:
  6217. case PPC::BI__builtin_vsx_xvrdpim:
  6218. case PPC::BI__builtin_vsx_xvrspim:
  6219. case PPC::BI__builtin_vsx_xvrdpi:
  6220. case PPC::BI__builtin_vsx_xvrspi:
  6221. case PPC::BI__builtin_vsx_xvrdpic:
  6222. case PPC::BI__builtin_vsx_xvrspic:
  6223. case PPC::BI__builtin_vsx_xvrdpiz:
  6224. case PPC::BI__builtin_vsx_xvrspiz: {
  6225. llvm::Type *ResultType = ConvertType(E->getType());
  6226. Value *X = EmitScalarExpr(E->getArg(0));
  6227. if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim ||
  6228. BuiltinID == PPC::BI__builtin_vsx_xvrspim)
  6229. ID = Intrinsic::floor;
  6230. else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi ||
  6231. BuiltinID == PPC::BI__builtin_vsx_xvrspi)
  6232. ID = Intrinsic::round;
  6233. else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic ||
  6234. BuiltinID == PPC::BI__builtin_vsx_xvrspic)
  6235. ID = Intrinsic::nearbyint;
  6236. else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip ||
  6237. BuiltinID == PPC::BI__builtin_vsx_xvrspip)
  6238. ID = Intrinsic::ceil;
  6239. else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz ||
  6240. BuiltinID == PPC::BI__builtin_vsx_xvrspiz)
  6241. ID = Intrinsic::trunc;
  6242. llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
  6243. return Builder.CreateCall(F, X);
  6244. }
  6245. // FMA variations
  6246. case PPC::BI__builtin_vsx_xvmaddadp:
  6247. case PPC::BI__builtin_vsx_xvmaddasp:
  6248. case PPC::BI__builtin_vsx_xvnmaddadp:
  6249. case PPC::BI__builtin_vsx_xvnmaddasp:
  6250. case PPC::BI__builtin_vsx_xvmsubadp:
  6251. case PPC::BI__builtin_vsx_xvmsubasp:
  6252. case PPC::BI__builtin_vsx_xvnmsubadp:
  6253. case PPC::BI__builtin_vsx_xvnmsubasp: {
  6254. llvm::Type *ResultType = ConvertType(E->getType());
  6255. Value *X = EmitScalarExpr(E->getArg(0));
  6256. Value *Y = EmitScalarExpr(E->getArg(1));
  6257. Value *Z = EmitScalarExpr(E->getArg(2));
  6258. Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
  6259. llvm::Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
  6260. switch (BuiltinID) {
  6261. case PPC::BI__builtin_vsx_xvmaddadp:
  6262. case PPC::BI__builtin_vsx_xvmaddasp:
  6263. return Builder.CreateCall(F, {X, Y, Z});
  6264. case PPC::BI__builtin_vsx_xvnmaddadp:
  6265. case PPC::BI__builtin_vsx_xvnmaddasp:
  6266. return Builder.CreateFSub(Zero,
  6267. Builder.CreateCall(F, {X, Y, Z}), "sub");
  6268. case PPC::BI__builtin_vsx_xvmsubadp:
  6269. case PPC::BI__builtin_vsx_xvmsubasp:
  6270. return Builder.CreateCall(F,
  6271. {X, Y, Builder.CreateFSub(Zero, Z, "sub")});
  6272. case PPC::BI__builtin_vsx_xvnmsubadp:
  6273. case PPC::BI__builtin_vsx_xvnmsubasp:
  6274. Value *FsubRes =
  6275. Builder.CreateCall(F, {X, Y, Builder.CreateFSub(Zero, Z, "sub")});
  6276. return Builder.CreateFSub(Zero, FsubRes, "sub");
  6277. }
  6278. llvm_unreachable("Unknown FMA operation");
  6279. return nullptr; // Suppress no-return warning
  6280. }
  6281. }
  6282. }
  6283. // Emit an intrinsic that has 1 float or double.
  6284. static Value *emitUnaryFPBuiltin(CodeGenFunction &CGF,
  6285. const CallExpr *E,
  6286. unsigned IntrinsicID) {
  6287. llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
  6288. Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
  6289. return CGF.Builder.CreateCall(F, Src0);
  6290. }
  6291. // Emit an intrinsic that has 3 float or double operands.
  6292. static Value *emitTernaryFPBuiltin(CodeGenFunction &CGF,
  6293. const CallExpr *E,
  6294. unsigned IntrinsicID) {
  6295. llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
  6296. llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
  6297. llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
  6298. Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
  6299. return CGF.Builder.CreateCall(F, {Src0, Src1, Src2});
  6300. }
  6301. // Emit an intrinsic that has 1 float or double operand, and 1 integer.
  6302. static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
  6303. const CallExpr *E,
  6304. unsigned IntrinsicID) {
  6305. llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
  6306. llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
  6307. Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
  6308. return CGF.Builder.CreateCall(F, {Src0, Src1});
  6309. }
  6310. Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
  6311. const CallExpr *E) {
  6312. switch (BuiltinID) {
  6313. case AMDGPU::BI__builtin_amdgpu_div_scale:
  6314. case AMDGPU::BI__builtin_amdgpu_div_scalef: {
  6315. // Translate from the intrinsics's struct return to the builtin's out
  6316. // argument.
  6317. std::pair<llvm::Value *, unsigned> FlagOutPtr
  6318. = EmitPointerWithAlignment(E->getArg(3));
  6319. llvm::Value *X = EmitScalarExpr(E->getArg(0));
  6320. llvm::Value *Y = EmitScalarExpr(E->getArg(1));
  6321. llvm::Value *Z = EmitScalarExpr(E->getArg(2));
  6322. llvm::Value *Callee = CGM.getIntrinsic(Intrinsic::AMDGPU_div_scale,
  6323. X->getType());
  6324. llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z});
  6325. llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0);
  6326. llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1);
  6327. llvm::Type *RealFlagType
  6328. = FlagOutPtr.first->getType()->getPointerElementType();
  6329. llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType);
  6330. llvm::StoreInst *FlagStore = Builder.CreateStore(FlagExt, FlagOutPtr.first);
  6331. FlagStore->setAlignment(FlagOutPtr.second);
  6332. return Result;
  6333. }
  6334. case AMDGPU::BI__builtin_amdgpu_div_fmas:
  6335. case AMDGPU::BI__builtin_amdgpu_div_fmasf: {
  6336. llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
  6337. llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
  6338. llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
  6339. llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
  6340. llvm::Value *F = CGM.getIntrinsic(Intrinsic::AMDGPU_div_fmas,
  6341. Src0->getType());
  6342. llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3);
  6343. return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool});
  6344. }
  6345. case AMDGPU::BI__builtin_amdgpu_div_fixup:
  6346. case AMDGPU::BI__builtin_amdgpu_div_fixupf:
  6347. return emitTernaryFPBuiltin(*this, E, Intrinsic::AMDGPU_div_fixup);
  6348. case AMDGPU::BI__builtin_amdgpu_trig_preop:
  6349. case AMDGPU::BI__builtin_amdgpu_trig_preopf:
  6350. return emitFPIntBuiltin(*this, E, Intrinsic::AMDGPU_trig_preop);
  6351. case AMDGPU::BI__builtin_amdgpu_rcp:
  6352. case AMDGPU::BI__builtin_amdgpu_rcpf:
  6353. return emitUnaryFPBuiltin(*this, E, Intrinsic::AMDGPU_rcp);
  6354. case AMDGPU::BI__builtin_amdgpu_rsq:
  6355. case AMDGPU::BI__builtin_amdgpu_rsqf:
  6356. return emitUnaryFPBuiltin(*this, E, Intrinsic::AMDGPU_rsq);
  6357. case AMDGPU::BI__builtin_amdgpu_rsq_clamped:
  6358. case AMDGPU::BI__builtin_amdgpu_rsq_clampedf:
  6359. return emitUnaryFPBuiltin(*this, E, Intrinsic::AMDGPU_rsq_clamped);
  6360. case AMDGPU::BI__builtin_amdgpu_ldexp:
  6361. case AMDGPU::BI__builtin_amdgpu_ldexpf:
  6362. return emitFPIntBuiltin(*this, E, Intrinsic::AMDGPU_ldexp);
  6363. case AMDGPU::BI__builtin_amdgpu_class:
  6364. case AMDGPU::BI__builtin_amdgpu_classf:
  6365. return emitFPIntBuiltin(*this, E, Intrinsic::AMDGPU_class);
  6366. default:
  6367. return nullptr;
  6368. }
  6369. }
  6370. /// Handle a SystemZ function in which the final argument is a pointer
  6371. /// to an int that receives the post-instruction CC value. At the LLVM level
  6372. /// this is represented as a function that returns a {result, cc} pair.
  6373. static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF,
  6374. unsigned IntrinsicID,
  6375. const CallExpr *E) {
  6376. unsigned NumArgs = E->getNumArgs() - 1;
  6377. SmallVector<Value *, 8> Args(NumArgs);
  6378. for (unsigned I = 0; I < NumArgs; ++I)
  6379. Args[I] = CGF.EmitScalarExpr(E->getArg(I));
  6380. Value *CCPtr = CGF.EmitScalarExpr(E->getArg(NumArgs));
  6381. Value *F = CGF.CGM.getIntrinsic(IntrinsicID);
  6382. Value *Call = CGF.Builder.CreateCall(F, Args);
  6383. Value *CC = CGF.Builder.CreateExtractValue(Call, 1);
  6384. CGF.Builder.CreateStore(CC, CCPtr);
  6385. return CGF.Builder.CreateExtractValue(Call, 0);
  6386. }
  6387. Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
  6388. const CallExpr *E) {
  6389. switch (BuiltinID) {
  6390. case SystemZ::BI__builtin_tbegin: {
  6391. Value *TDB = EmitScalarExpr(E->getArg(0));
  6392. Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
  6393. Value *F = CGM.getIntrinsic(Intrinsic::s390_tbegin);
  6394. return Builder.CreateCall(F, {TDB, Control});
  6395. }
  6396. case SystemZ::BI__builtin_tbegin_nofloat: {
  6397. Value *TDB = EmitScalarExpr(E->getArg(0));
  6398. Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
  6399. Value *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat);
  6400. return Builder.CreateCall(F, {TDB, Control});
  6401. }
  6402. case SystemZ::BI__builtin_tbeginc: {
  6403. Value *TDB = llvm::ConstantPointerNull::get(Int8PtrTy);
  6404. Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff08);
  6405. Value *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc);
  6406. return Builder.CreateCall(F, {TDB, Control});
  6407. }
  6408. case SystemZ::BI__builtin_tabort: {
  6409. Value *Data = EmitScalarExpr(E->getArg(0));
  6410. Value *F = CGM.getIntrinsic(Intrinsic::s390_tabort);
  6411. return Builder.CreateCall(F, Builder.CreateSExt(Data, Int64Ty, "tabort"));
  6412. }
  6413. case SystemZ::BI__builtin_non_tx_store: {
  6414. Value *Address = EmitScalarExpr(E->getArg(0));
  6415. Value *Data = EmitScalarExpr(E->getArg(1));
  6416. Value *F = CGM.getIntrinsic(Intrinsic::s390_ntstg);
  6417. return Builder.CreateCall(F, {Data, Address});
  6418. }
  6419. // Vector builtins. Note that most vector builtins are mapped automatically
  6420. // to target-specific LLVM intrinsics. The ones handled specially here can
  6421. // be represented via standard LLVM IR, which is preferable to enable common
  6422. // LLVM optimizations.
  6423. case SystemZ::BI__builtin_s390_vpopctb:
  6424. case SystemZ::BI__builtin_s390_vpopcth:
  6425. case SystemZ::BI__builtin_s390_vpopctf:
  6426. case SystemZ::BI__builtin_s390_vpopctg: {
  6427. llvm::Type *ResultType = ConvertType(E->getType());
  6428. Value *X = EmitScalarExpr(E->getArg(0));
  6429. Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
  6430. return Builder.CreateCall(F, X);
  6431. }
  6432. case SystemZ::BI__builtin_s390_vclzb:
  6433. case SystemZ::BI__builtin_s390_vclzh:
  6434. case SystemZ::BI__builtin_s390_vclzf:
  6435. case SystemZ::BI__builtin_s390_vclzg: {
  6436. llvm::Type *ResultType = ConvertType(E->getType());
  6437. Value *X = EmitScalarExpr(E->getArg(0));
  6438. Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
  6439. Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
  6440. return Builder.CreateCall(F, {X, Undef});
  6441. }
  6442. case SystemZ::BI__builtin_s390_vctzb:
  6443. case SystemZ::BI__builtin_s390_vctzh:
  6444. case SystemZ::BI__builtin_s390_vctzf:
  6445. case SystemZ::BI__builtin_s390_vctzg: {
  6446. llvm::Type *ResultType = ConvertType(E->getType());
  6447. Value *X = EmitScalarExpr(E->getArg(0));
  6448. Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
  6449. Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
  6450. return Builder.CreateCall(F, {X, Undef});
  6451. }
  6452. case SystemZ::BI__builtin_s390_vfsqdb: {
  6453. llvm::Type *ResultType = ConvertType(E->getType());
  6454. Value *X = EmitScalarExpr(E->getArg(0));
  6455. Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
  6456. return Builder.CreateCall(F, X);
  6457. }
  6458. case SystemZ::BI__builtin_s390_vfmadb: {
  6459. llvm::Type *ResultType = ConvertType(E->getType());
  6460. Value *X = EmitScalarExpr(E->getArg(0));
  6461. Value *Y = EmitScalarExpr(E->getArg(1));
  6462. Value *Z = EmitScalarExpr(E->getArg(2));
  6463. Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
  6464. return Builder.CreateCall(F, {X, Y, Z});
  6465. }
  6466. case SystemZ::BI__builtin_s390_vfmsdb: {
  6467. llvm::Type *ResultType = ConvertType(E->getType());
  6468. Value *X = EmitScalarExpr(E->getArg(0));
  6469. Value *Y = EmitScalarExpr(E->getArg(1));
  6470. Value *Z = EmitScalarExpr(E->getArg(2));
  6471. Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
  6472. Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
  6473. return Builder.CreateCall(F, {X, Y, Builder.CreateFSub(Zero, Z, "sub")});
  6474. }
  6475. case SystemZ::BI__builtin_s390_vflpdb: {
  6476. llvm::Type *ResultType = ConvertType(E->getType());
  6477. Value *X = EmitScalarExpr(E->getArg(0));
  6478. Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
  6479. return Builder.CreateCall(F, X);
  6480. }
  6481. case SystemZ::BI__builtin_s390_vflndb: {
  6482. llvm::Type *ResultType = ConvertType(E->getType());
  6483. Value *X = EmitScalarExpr(E->getArg(0));
  6484. Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
  6485. Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
  6486. return Builder.CreateFSub(Zero, Builder.CreateCall(F, X), "sub");
  6487. }
  6488. case SystemZ::BI__builtin_s390_vfidb: {
  6489. llvm::Type *ResultType = ConvertType(E->getType());
  6490. Value *X = EmitScalarExpr(E->getArg(0));
  6491. // Constant-fold the M4 and M5 mask arguments.
  6492. llvm::APSInt M4, M5;
  6493. bool IsConstM4 = E->getArg(1)->isIntegerConstantExpr(M4, getContext());
  6494. bool IsConstM5 = E->getArg(2)->isIntegerConstantExpr(M5, getContext());
  6495. assert(IsConstM4 && IsConstM5 && "Constant arg isn't actually constant?");
  6496. (void)IsConstM4; (void)IsConstM5;
  6497. // Check whether this instance of vfidb can be represented via a LLVM
  6498. // standard intrinsic. We only support some combinations of M4 and M5.
  6499. Intrinsic::ID ID = Intrinsic::not_intrinsic;
  6500. switch (M4.getZExtValue()) {
  6501. default: break;
  6502. case 0: // IEEE-inexact exception allowed
  6503. switch (M5.getZExtValue()) {
  6504. default: break;
  6505. case 0: ID = Intrinsic::rint; break;
  6506. }
  6507. break;
  6508. case 4: // IEEE-inexact exception suppressed
  6509. switch (M5.getZExtValue()) {
  6510. default: break;
  6511. case 0: ID = Intrinsic::nearbyint; break;
  6512. case 1: ID = Intrinsic::round; break;
  6513. case 5: ID = Intrinsic::trunc; break;
  6514. case 6: ID = Intrinsic::ceil; break;
  6515. case 7: ID = Intrinsic::floor; break;
  6516. }
  6517. break;
  6518. }
  6519. if (ID != Intrinsic::not_intrinsic) {
  6520. Function *F = CGM.getIntrinsic(ID, ResultType);
  6521. return Builder.CreateCall(F, X);
  6522. }
  6523. Function *F = CGM.getIntrinsic(Intrinsic::s390_vfidb);
  6524. Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
  6525. Value *M5Value = llvm::ConstantInt::get(getLLVMContext(), M5);
  6526. return Builder.CreateCall(F, {X, M4Value, M5Value});
  6527. }
  6528. // Vector intrisincs that output the post-instruction CC value.
  6529. #define INTRINSIC_WITH_CC(NAME) \
  6530. case SystemZ::BI__builtin_##NAME: \
  6531. return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E)
  6532. INTRINSIC_WITH_CC(s390_vpkshs);
  6533. INTRINSIC_WITH_CC(s390_vpksfs);
  6534. INTRINSIC_WITH_CC(s390_vpksgs);
  6535. INTRINSIC_WITH_CC(s390_vpklshs);
  6536. INTRINSIC_WITH_CC(s390_vpklsfs);
  6537. INTRINSIC_WITH_CC(s390_vpklsgs);
  6538. INTRINSIC_WITH_CC(s390_vceqbs);
  6539. INTRINSIC_WITH_CC(s390_vceqhs);
  6540. INTRINSIC_WITH_CC(s390_vceqfs);
  6541. INTRINSIC_WITH_CC(s390_vceqgs);
  6542. INTRINSIC_WITH_CC(s390_vchbs);
  6543. INTRINSIC_WITH_CC(s390_vchhs);
  6544. INTRINSIC_WITH_CC(s390_vchfs);
  6545. INTRINSIC_WITH_CC(s390_vchgs);
  6546. INTRINSIC_WITH_CC(s390_vchlbs);
  6547. INTRINSIC_WITH_CC(s390_vchlhs);
  6548. INTRINSIC_WITH_CC(s390_vchlfs);
  6549. INTRINSIC_WITH_CC(s390_vchlgs);
  6550. INTRINSIC_WITH_CC(s390_vfaebs);
  6551. INTRINSIC_WITH_CC(s390_vfaehs);
  6552. INTRINSIC_WITH_CC(s390_vfaefs);
  6553. INTRINSIC_WITH_CC(s390_vfaezbs);
  6554. INTRINSIC_WITH_CC(s390_vfaezhs);
  6555. INTRINSIC_WITH_CC(s390_vfaezfs);
  6556. INTRINSIC_WITH_CC(s390_vfeebs);
  6557. INTRINSIC_WITH_CC(s390_vfeehs);
  6558. INTRINSIC_WITH_CC(s390_vfeefs);
  6559. INTRINSIC_WITH_CC(s390_vfeezbs);
  6560. INTRINSIC_WITH_CC(s390_vfeezhs);
  6561. INTRINSIC_WITH_CC(s390_vfeezfs);
  6562. INTRINSIC_WITH_CC(s390_vfenebs);
  6563. INTRINSIC_WITH_CC(s390_vfenehs);
  6564. INTRINSIC_WITH_CC(s390_vfenefs);
  6565. INTRINSIC_WITH_CC(s390_vfenezbs);
  6566. INTRINSIC_WITH_CC(s390_vfenezhs);
  6567. INTRINSIC_WITH_CC(s390_vfenezfs);
  6568. INTRINSIC_WITH_CC(s390_vistrbs);
  6569. INTRINSIC_WITH_CC(s390_vistrhs);
  6570. INTRINSIC_WITH_CC(s390_vistrfs);
  6571. INTRINSIC_WITH_CC(s390_vstrcbs);
  6572. INTRINSIC_WITH_CC(s390_vstrchs);
  6573. INTRINSIC_WITH_CC(s390_vstrcfs);
  6574. INTRINSIC_WITH_CC(s390_vstrczbs);
  6575. INTRINSIC_WITH_CC(s390_vstrczhs);
  6576. INTRINSIC_WITH_CC(s390_vstrczfs);
  6577. INTRINSIC_WITH_CC(s390_vfcedbs);
  6578. INTRINSIC_WITH_CC(s390_vfchdbs);
  6579. INTRINSIC_WITH_CC(s390_vfchedbs);
  6580. INTRINSIC_WITH_CC(s390_vftcidb);
  6581. #undef INTRINSIC_WITH_CC
  6582. default:
  6583. return nullptr;
  6584. }
  6585. }
  6586. Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
  6587. const CallExpr *E) {
  6588. switch (BuiltinID) {
  6589. case NVPTX::BI__nvvm_atom_add_gen_i:
  6590. case NVPTX::BI__nvvm_atom_add_gen_l:
  6591. case NVPTX::BI__nvvm_atom_add_gen_ll:
  6592. return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Add, E);
  6593. case NVPTX::BI__nvvm_atom_sub_gen_i:
  6594. case NVPTX::BI__nvvm_atom_sub_gen_l:
  6595. case NVPTX::BI__nvvm_atom_sub_gen_ll:
  6596. return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Sub, E);
  6597. case NVPTX::BI__nvvm_atom_and_gen_i:
  6598. case NVPTX::BI__nvvm_atom_and_gen_l:
  6599. case NVPTX::BI__nvvm_atom_and_gen_ll:
  6600. return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::And, E);
  6601. case NVPTX::BI__nvvm_atom_or_gen_i:
  6602. case NVPTX::BI__nvvm_atom_or_gen_l:
  6603. case NVPTX::BI__nvvm_atom_or_gen_ll:
  6604. return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Or, E);
  6605. case NVPTX::BI__nvvm_atom_xor_gen_i:
  6606. case NVPTX::BI__nvvm_atom_xor_gen_l:
  6607. case NVPTX::BI__nvvm_atom_xor_gen_ll:
  6608. return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xor, E);
  6609. case NVPTX::BI__nvvm_atom_xchg_gen_i:
  6610. case NVPTX::BI__nvvm_atom_xchg_gen_l:
  6611. case NVPTX::BI__nvvm_atom_xchg_gen_ll:
  6612. return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xchg, E);
  6613. case NVPTX::BI__nvvm_atom_max_gen_i:
  6614. case NVPTX::BI__nvvm_atom_max_gen_l:
  6615. case NVPTX::BI__nvvm_atom_max_gen_ll:
  6616. case NVPTX::BI__nvvm_atom_max_gen_ui:
  6617. case NVPTX::BI__nvvm_atom_max_gen_ul:
  6618. case NVPTX::BI__nvvm_atom_max_gen_ull:
  6619. return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E);
  6620. case NVPTX::BI__nvvm_atom_min_gen_i:
  6621. case NVPTX::BI__nvvm_atom_min_gen_l:
  6622. case NVPTX::BI__nvvm_atom_min_gen_ll:
  6623. case NVPTX::BI__nvvm_atom_min_gen_ui:
  6624. case NVPTX::BI__nvvm_atom_min_gen_ul:
  6625. case NVPTX::BI__nvvm_atom_min_gen_ull:
  6626. return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E);
  6627. case NVPTX::BI__nvvm_atom_cas_gen_i:
  6628. case NVPTX::BI__nvvm_atom_cas_gen_l:
  6629. case NVPTX::BI__nvvm_atom_cas_gen_ll:
  6630. return MakeAtomicCmpXchgValue(*this, E, true);
  6631. case NVPTX::BI__nvvm_atom_add_gen_f: {
  6632. Value *Ptr = EmitScalarExpr(E->getArg(0));
  6633. Value *Val = EmitScalarExpr(E->getArg(1));
  6634. // atomicrmw only deals with integer arguments so we need to use
  6635. // LLVM's nvvm_atomic_load_add_f32 intrinsic for that.
  6636. Value *FnALAF32 =
  6637. CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_add_f32, Ptr->getType());
  6638. return Builder.CreateCall(FnALAF32, {Ptr, Val});
  6639. }
  6640. default:
  6641. return nullptr;
  6642. }
  6643. }
  6644. #endif // HLSL Change Ends