sse2neon.h 264 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996
  1. #ifndef SSE2NEON_H
  2. #define SSE2NEON_H
  3. // This header file provides a simple API translation layer
  4. // between SSE intrinsics to their corresponding Arm/Aarch64 NEON versions
  5. //
  6. // This header file does not yet translate all of the SSE intrinsics.
  7. //
  8. // Contributors to this work are:
  9. // John W. Ratcliff <[email protected]>
  10. // Brandon Rowlett <[email protected]>
  11. // Ken Fast <[email protected]>
  12. // Eric van Beurden <[email protected]>
  13. // Alexander Potylitsin <[email protected]>
  14. // Hasindu Gamaarachchi <[email protected]>
  15. // Jim Huang <[email protected]>
  16. // Mark Cheng <[email protected]>
  17. // Malcolm James MacLeod <[email protected]>
  18. // Devin Hussey (easyaspi314) <[email protected]>
  19. // Sebastian Pop <[email protected]>
  20. // Developer Ecosystem Engineering <[email protected]>
  21. // Danila Kutenin <[email protected]>
  22. // François Turban (JishinMaster) <[email protected]>
  23. // Pei-Hsuan Hung <[email protected]>
  24. // Yang-Hao Yuan <[email protected]>
  25. // Syoyo Fujita <[email protected]>
  26. // Brecht Van Lommel <[email protected]>
  27. /*
  28. * sse2neon is freely redistributable under the MIT License.
  29. *
  30. * Permission is hereby granted, free of charge, to any person obtaining a copy
  31. * of this software and associated documentation files (the "Software"), to deal
  32. * in the Software without restriction, including without limitation the rights
  33. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  34. * copies of the Software, and to permit persons to whom the Software is
  35. * furnished to do so, subject to the following conditions:
  36. *
  37. * The above copyright notice and this permission notice shall be included in
  38. * all copies or substantial portions of the Software.
  39. *
  40. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  41. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  42. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  43. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  44. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  45. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  46. * SOFTWARE.
  47. */
  48. /* Tunable configurations */
  49. /* Enable precise implementation of math operations
  50. * This would slow down the computation a bit, but gives consistent result with
  51. * x86 SSE2. (e.g. would solve a hole or NaN pixel in the rendering result)
  52. */
  53. /* _mm_min_ps and _mm_max_ps */
  54. #ifndef SSE2NEON_PRECISE_MINMAX
  55. #define SSE2NEON_PRECISE_MINMAX (0)
  56. #endif
  57. /* _mm_rcp_ps and _mm_div_ps */
  58. #ifndef SSE2NEON_PRECISE_DIV
  59. #define SSE2NEON_PRECISE_DIV (0)
  60. #endif
  61. /* _mm_sqrt_ps and _mm_rsqrt_ps */
  62. #ifndef SSE2NEON_PRECISE_SQRT
  63. #define SSE2NEON_PRECISE_SQRT (0)
  64. #endif
  65. #ifndef SSE2NEON_PRECISE_RSQRT
  66. #define SSE2NEON_PRECISE_RSQRT (0)
  67. #endif
  68. #if defined(__GNUC__) || defined(__clang__)
  69. #pragma push_macro("FORCE_INLINE")
  70. #pragma push_macro("ALIGN_STRUCT")
  71. #define FORCE_INLINE static inline __attribute__((always_inline))
  72. #define ALIGN_STRUCT(x) __attribute__((aligned(x)))
  73. #ifndef likely
  74. #define likely(x) __builtin_expect(!!(x), 1)
  75. #endif
  76. #ifndef unlikely
  77. #define unlikely(x) __builtin_expect(!!(x), 0)
  78. #endif
  79. #else
  80. #error "Macro name collisions may happen with unsupported compiler."
  81. #ifdef FORCE_INLINE
  82. #undef FORCE_INLINE
  83. #endif
  84. #define FORCE_INLINE static inline
  85. #ifndef ALIGN_STRUCT
  86. #define ALIGN_STRUCT(x) __declspec(align(x))
  87. #endif
  88. #endif
  89. #ifndef likely
  90. #define likely(x) (x)
  91. #endif
  92. #ifndef unlikely
  93. #define unlikely(x) (x)
  94. #endif
  95. #include <stdint.h>
  96. #include <stdlib.h>
  97. /* Architecture-specific build options */
  98. /* FIXME: #pragma GCC push_options is only available on GCC */
  99. #if defined(__GNUC__)
  100. #if defined(__arm__) && __ARM_ARCH == 7
  101. /* According to ARM C Language Extensions Architecture specification,
  102. * __ARM_NEON is defined to a value indicating the Advanced SIMD (NEON)
  103. * architecture supported.
  104. */
  105. #if !defined(__ARM_NEON) || !defined(__ARM_NEON__)
  106. #error "You must enable NEON instructions (e.g. -mfpu=neon) to use SSE2NEON."
  107. #endif
  108. #if !defined(__clang__)
  109. #pragma GCC push_options
  110. #pragma GCC target("fpu=neon")
  111. #endif
  112. #elif defined(__aarch64__)
  113. #if !defined(__clang__)
  114. #pragma GCC push_options
  115. #pragma GCC target("+simd")
  116. #endif
  117. #else
  118. #error "Unsupported target. Must be either ARMv7-A+NEON or ARMv8-A."
  119. #endif
  120. #endif
  121. #include <arm_neon.h>
  122. /* Rounding functions require either Aarch64 instructions or libm failback */
  123. #if !defined(__aarch64__)
  124. #include <math.h>
  125. #endif
  126. /* "__has_builtin" can be used to query support for built-in functions
  127. * provided by gcc/clang and other compilers that support it.
  128. */
  129. #ifndef __has_builtin /* GCC prior to 10 or non-clang compilers */
  130. /* Compatibility with gcc <= 9 */
  131. #if __GNUC__ <= 9
  132. #define __has_builtin(x) HAS##x
  133. #define HAS__builtin_popcount 1
  134. #define HAS__builtin_popcountll 1
  135. #else
  136. #define __has_builtin(x) 0
  137. #endif
  138. #endif
  139. /**
  140. * MACRO for shuffle parameter for _mm_shuffle_ps().
  141. * Argument fp3 is a digit[0123] that represents the fp from argument "b"
  142. * of mm_shuffle_ps that will be placed in fp3 of result. fp2 is the same
  143. * for fp2 in result. fp1 is a digit[0123] that represents the fp from
  144. * argument "a" of mm_shuffle_ps that will be places in fp1 of result.
  145. * fp0 is the same for fp0 of result.
  146. */
  147. #define _MM_SHUFFLE(fp3, fp2, fp1, fp0) \
  148. (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | ((fp0)))
  149. /* Rounding mode macros. */
  150. #define _MM_FROUND_TO_NEAREST_INT 0x00
  151. #define _MM_FROUND_TO_NEG_INF 0x01
  152. #define _MM_FROUND_TO_POS_INF 0x02
  153. #define _MM_FROUND_TO_ZERO 0x03
  154. #define _MM_FROUND_CUR_DIRECTION 0x04
  155. #define _MM_FROUND_NO_EXC 0x08
  156. #define _MM_ROUND_NEAREST 0x0000
  157. #define _MM_ROUND_DOWN 0x2000
  158. #define _MM_ROUND_UP 0x4000
  159. #define _MM_ROUND_TOWARD_ZERO 0x6000
  160. /* indicate immediate constant argument in a given range */
  161. #define __constrange(a, b) const
  162. /* A few intrinsics accept traditional data types like ints or floats, but
  163. * most operate on data types that are specific to SSE.
  164. * If a vector type ends in d, it contains doubles, and if it does not have
  165. * a suffix, it contains floats. An integer vector type can contain any type
  166. * of integer, from chars to shorts to unsigned long longs.
  167. */
  168. typedef int64x1_t __m64;
  169. typedef float32x4_t __m128; /* 128-bit vector containing 4 floats */
  170. // On ARM 32-bit architecture, the float64x2_t is not supported.
  171. // The data type __m128d should be represented in a different way for related
  172. // intrinsic conversion.
  173. #if defined(__aarch64__)
  174. typedef float64x2_t __m128d; /* 128-bit vector containing 2 doubles */
  175. #else
  176. typedef float32x4_t __m128d;
  177. #endif
  178. typedef int64x2_t __m128i; /* 128-bit vector containing integers */
  179. /* type-safe casting between types */
  180. #define vreinterpretq_m128_f16(x) vreinterpretq_f32_f16(x)
  181. #define vreinterpretq_m128_f32(x) (x)
  182. #define vreinterpretq_m128_f64(x) vreinterpretq_f32_f64(x)
  183. #define vreinterpretq_m128_u8(x) vreinterpretq_f32_u8(x)
  184. #define vreinterpretq_m128_u16(x) vreinterpretq_f32_u16(x)
  185. #define vreinterpretq_m128_u32(x) vreinterpretq_f32_u32(x)
  186. #define vreinterpretq_m128_u64(x) vreinterpretq_f32_u64(x)
  187. #define vreinterpretq_m128_s8(x) vreinterpretq_f32_s8(x)
  188. #define vreinterpretq_m128_s16(x) vreinterpretq_f32_s16(x)
  189. #define vreinterpretq_m128_s32(x) vreinterpretq_f32_s32(x)
  190. #define vreinterpretq_m128_s64(x) vreinterpretq_f32_s64(x)
  191. #define vreinterpretq_f16_m128(x) vreinterpretq_f16_f32(x)
  192. #define vreinterpretq_f32_m128(x) (x)
  193. #define vreinterpretq_f64_m128(x) vreinterpretq_f64_f32(x)
  194. #define vreinterpretq_u8_m128(x) vreinterpretq_u8_f32(x)
  195. #define vreinterpretq_u16_m128(x) vreinterpretq_u16_f32(x)
  196. #define vreinterpretq_u32_m128(x) vreinterpretq_u32_f32(x)
  197. #define vreinterpretq_u64_m128(x) vreinterpretq_u64_f32(x)
  198. #define vreinterpretq_s8_m128(x) vreinterpretq_s8_f32(x)
  199. #define vreinterpretq_s16_m128(x) vreinterpretq_s16_f32(x)
  200. #define vreinterpretq_s32_m128(x) vreinterpretq_s32_f32(x)
  201. #define vreinterpretq_s64_m128(x) vreinterpretq_s64_f32(x)
  202. #define vreinterpretq_m128i_s8(x) vreinterpretq_s64_s8(x)
  203. #define vreinterpretq_m128i_s16(x) vreinterpretq_s64_s16(x)
  204. #define vreinterpretq_m128i_s32(x) vreinterpretq_s64_s32(x)
  205. #define vreinterpretq_m128i_s64(x) (x)
  206. #define vreinterpretq_m128i_u8(x) vreinterpretq_s64_u8(x)
  207. #define vreinterpretq_m128i_u16(x) vreinterpretq_s64_u16(x)
  208. #define vreinterpretq_m128i_u32(x) vreinterpretq_s64_u32(x)
  209. #define vreinterpretq_m128i_u64(x) vreinterpretq_s64_u64(x)
  210. #define vreinterpretq_f32_m128i(x) vreinterpretq_f32_s64(x)
  211. #define vreinterpretq_f64_m128i(x) vreinterpretq_f64_s64(x)
  212. #define vreinterpretq_s8_m128i(x) vreinterpretq_s8_s64(x)
  213. #define vreinterpretq_s16_m128i(x) vreinterpretq_s16_s64(x)
  214. #define vreinterpretq_s32_m128i(x) vreinterpretq_s32_s64(x)
  215. #define vreinterpretq_s64_m128i(x) (x)
  216. #define vreinterpretq_u8_m128i(x) vreinterpretq_u8_s64(x)
  217. #define vreinterpretq_u16_m128i(x) vreinterpretq_u16_s64(x)
  218. #define vreinterpretq_u32_m128i(x) vreinterpretq_u32_s64(x)
  219. #define vreinterpretq_u64_m128i(x) vreinterpretq_u64_s64(x)
  220. #define vreinterpret_m64_s8(x) vreinterpret_s64_s8(x)
  221. #define vreinterpret_m64_s16(x) vreinterpret_s64_s16(x)
  222. #define vreinterpret_m64_s32(x) vreinterpret_s64_s32(x)
  223. #define vreinterpret_m64_s64(x) (x)
  224. #define vreinterpret_m64_u8(x) vreinterpret_s64_u8(x)
  225. #define vreinterpret_m64_u16(x) vreinterpret_s64_u16(x)
  226. #define vreinterpret_m64_u32(x) vreinterpret_s64_u32(x)
  227. #define vreinterpret_m64_u64(x) vreinterpret_s64_u64(x)
  228. #define vreinterpret_m64_f16(x) vreinterpret_s64_f16(x)
  229. #define vreinterpret_m64_f32(x) vreinterpret_s64_f32(x)
  230. #define vreinterpret_m64_f64(x) vreinterpret_s64_f64(x)
  231. #define vreinterpret_u8_m64(x) vreinterpret_u8_s64(x)
  232. #define vreinterpret_u16_m64(x) vreinterpret_u16_s64(x)
  233. #define vreinterpret_u32_m64(x) vreinterpret_u32_s64(x)
  234. #define vreinterpret_u64_m64(x) vreinterpret_u64_s64(x)
  235. #define vreinterpret_s8_m64(x) vreinterpret_s8_s64(x)
  236. #define vreinterpret_s16_m64(x) vreinterpret_s16_s64(x)
  237. #define vreinterpret_s32_m64(x) vreinterpret_s32_s64(x)
  238. #define vreinterpret_s64_m64(x) (x)
  239. #define vreinterpret_f32_m64(x) vreinterpret_f32_s64(x)
  240. #if defined(__aarch64__)
  241. #define vreinterpretq_m128d_s32(x) vreinterpretq_f64_s32(x)
  242. #define vreinterpretq_m128d_s64(x) vreinterpretq_f64_s64(x)
  243. #define vreinterpretq_m128d_u64(x) vreinterpretq_f64_u64(x)
  244. #define vreinterpretq_m128d_f32(x) vreinterpretq_f64_f32(x)
  245. #define vreinterpretq_m128d_f64(x) (x)
  246. #define vreinterpretq_s64_m128d(x) vreinterpretq_s64_f64(x)
  247. #define vreinterpretq_u64_m128d(x) vreinterpretq_u64_f64(x)
  248. #define vreinterpretq_f64_m128d(x) (x)
  249. #define vreinterpretq_f32_m128d(x) vreinterpretq_f32_f64(x)
  250. #else
  251. #define vreinterpretq_m128d_s32(x) vreinterpretq_f32_s32(x)
  252. #define vreinterpretq_m128d_s64(x) vreinterpretq_f32_s64(x)
  253. #define vreinterpretq_m128d_u32(x) vreinterpretq_f32_u32(x)
  254. #define vreinterpretq_m128d_u64(x) vreinterpretq_f32_u64(x)
  255. #define vreinterpretq_m128d_f32(x) (x)
  256. #define vreinterpretq_s64_m128d(x) vreinterpretq_s64_f32(x)
  257. #define vreinterpretq_u32_m128d(x) vreinterpretq_u32_f32(x)
  258. #define vreinterpretq_u64_m128d(x) vreinterpretq_u64_f32(x)
  259. #define vreinterpretq_f32_m128d(x) (x)
  260. #endif
  261. // A struct is defined in this header file called 'SIMDVec' which can be used
  262. // by applications which attempt to access the contents of an _m128 struct
  263. // directly. It is important to note that accessing the __m128 struct directly
  264. // is bad coding practice by Microsoft: @see:
  265. // https://msdn.microsoft.com/en-us/library/ayeb3ayc.aspx
  266. //
  267. // However, some legacy source code may try to access the contents of an __m128
  268. // struct directly so the developer can use the SIMDVec as an alias for it. Any
  269. // casting must be done manually by the developer, as you cannot cast or
  270. // otherwise alias the base NEON data type for intrinsic operations.
  271. //
  272. // union intended to allow direct access to an __m128 variable using the names
  273. // that the MSVC compiler provides. This union should really only be used when
  274. // trying to access the members of the vector as integer values. GCC/clang
  275. // allow native access to the float members through a simple array access
  276. // operator (in C since 4.6, in C++ since 4.8).
  277. //
  278. // Ideally direct accesses to SIMD vectors should not be used since it can cause
  279. // a performance hit. If it really is needed however, the original __m128
  280. // variable can be aliased with a pointer to this union and used to access
  281. // individual components. The use of this union should be hidden behind a macro
  282. // that is used throughout the codebase to access the members instead of always
  283. // declaring this type of variable.
  284. typedef union ALIGN_STRUCT(16) SIMDVec {
  285. float m128_f32[4]; // as floats - DON'T USE. Added for convenience.
  286. int8_t m128_i8[16]; // as signed 8-bit integers.
  287. int16_t m128_i16[8]; // as signed 16-bit integers.
  288. int32_t m128_i32[4]; // as signed 32-bit integers.
  289. int64_t m128_i64[2]; // as signed 64-bit integers.
  290. uint8_t m128_u8[16]; // as unsigned 8-bit integers.
  291. uint16_t m128_u16[8]; // as unsigned 16-bit integers.
  292. uint32_t m128_u32[4]; // as unsigned 32-bit integers.
  293. uint64_t m128_u64[2]; // as unsigned 64-bit integers.
  294. } SIMDVec;
  295. // casting using SIMDVec
  296. #define vreinterpretq_nth_u64_m128i(x, n) (((SIMDVec *) &x)->m128_u64[n])
  297. #define vreinterpretq_nth_u32_m128i(x, n) (((SIMDVec *) &x)->m128_u32[n])
  298. #define vreinterpretq_nth_u8_m128i(x, n) (((SIMDVec *) &x)->m128_u8[n])
  299. /* Backwards compatibility for compilers with lack of specific type support */
  300. // Older gcc does not define vld1q_u8_x4 type
  301. #if defined(__GNUC__) && !defined(__clang__) && \
  302. ((__GNUC__ == 10 && (__GNUC_MINOR__ <= 1)) || \
  303. (__GNUC__ == 9 && (__GNUC_MINOR__ <= 3)) || \
  304. (__GNUC__ == 8 && (__GNUC_MINOR__ <= 4)) || __GNUC__ <= 7)
  305. FORCE_INLINE uint8x16x4_t _sse2neon_vld1q_u8_x4(const uint8_t *p)
  306. {
  307. uint8x16x4_t ret;
  308. ret.val[0] = vld1q_u8(p + 0);
  309. ret.val[1] = vld1q_u8(p + 16);
  310. ret.val[2] = vld1q_u8(p + 32);
  311. ret.val[3] = vld1q_u8(p + 48);
  312. return ret;
  313. }
  314. #else
  315. // Wraps vld1q_u8_x4
  316. FORCE_INLINE uint8x16x4_t _sse2neon_vld1q_u8_x4(const uint8_t *p)
  317. {
  318. return vld1q_u8_x4(p);
  319. }
  320. #endif
  321. /* Function Naming Conventions
  322. * The naming convention of SSE intrinsics is straightforward. A generic SSE
  323. * intrinsic function is given as follows:
  324. * _mm_<name>_<data_type>
  325. *
  326. * The parts of this format are given as follows:
  327. * 1. <name> describes the operation performed by the intrinsic
  328. * 2. <data_type> identifies the data type of the function's primary arguments
  329. *
  330. * This last part, <data_type>, is a little complicated. It identifies the
  331. * content of the input values, and can be set to any of the following values:
  332. * + ps - vectors contain floats (ps stands for packed single-precision)
  333. * + pd - vectors cantain doubles (pd stands for packed double-precision)
  334. * + epi8/epi16/epi32/epi64 - vectors contain 8-bit/16-bit/32-bit/64-bit
  335. * signed integers
  336. * + epu8/epu16/epu32/epu64 - vectors contain 8-bit/16-bit/32-bit/64-bit
  337. * unsigned integers
  338. * + si128 - unspecified 128-bit vector or 256-bit vector
  339. * + m128/m128i/m128d - identifies input vector types when they are different
  340. * than the type of the returned vector
  341. *
  342. * For example, _mm_setzero_ps. The _mm implies that the function returns
  343. * a 128-bit vector. The _ps at the end implies that the argument vectors
  344. * contain floats.
  345. *
  346. * A complete example: Byte Shuffle - pshufb (_mm_shuffle_epi8)
  347. * // Set packed 16-bit integers. 128 bits, 8 short, per 16 bits
  348. * __m128i v_in = _mm_setr_epi16(1, 2, 3, 4, 5, 6, 7, 8);
  349. * // Set packed 8-bit integers
  350. * // 128 bits, 16 chars, per 8 bits
  351. * __m128i v_perm = _mm_setr_epi8(1, 0, 2, 3, 8, 9, 10, 11,
  352. * 4, 5, 12, 13, 6, 7, 14, 15);
  353. * // Shuffle packed 8-bit integers
  354. * __m128i v_out = _mm_shuffle_epi8(v_in, v_perm); // pshufb
  355. *
  356. * Data (Number, Binary, Byte Index):
  357. +------+------+-------------+------+------+-------------+
  358. | 1 | 2 | 3 | 4 | Number
  359. +------+------+------+------+------+------+------+------+
  360. | 0000 | 0001 | 0000 | 0010 | 0000 | 0011 | 0000 | 0100 | Binary
  361. +------+------+------+------+------+------+------+------+
  362. | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | Index
  363. +------+------+------+------+------+------+------+------+
  364. +------+------+------+------+------+------+------+------+
  365. | 5 | 6 | 7 | 8 | Number
  366. +------+------+------+------+------+------+------+------+
  367. | 0000 | 0101 | 0000 | 0110 | 0000 | 0111 | 0000 | 1000 | Binary
  368. +------+------+------+------+------+------+------+------+
  369. | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | Index
  370. +------+------+------+------+------+------+------+------+
  371. * Index (Byte Index):
  372. +------+------+------+------+------+------+------+------+
  373. | 1 | 0 | 2 | 3 | 8 | 9 | 10 | 11 |
  374. +------+------+------+------+------+------+------+------+
  375. +------+------+------+------+------+------+------+------+
  376. | 4 | 5 | 12 | 13 | 6 | 7 | 14 | 15 |
  377. +------+------+------+------+------+------+------+------+
  378. * Result:
  379. +------+------+------+------+------+------+------+------+
  380. | 1 | 0 | 2 | 3 | 8 | 9 | 10 | 11 | Index
  381. +------+------+------+------+------+------+------+------+
  382. | 0001 | 0000 | 0000 | 0010 | 0000 | 0101 | 0000 | 0110 | Binary
  383. +------+------+------+------+------+------+------+------+
  384. | 256 | 2 | 5 | 6 | Number
  385. +------+------+------+------+------+------+------+------+
  386. +------+------+------+------+------+------+------+------+
  387. | 4 | 5 | 12 | 13 | 6 | 7 | 14 | 15 | Index
  388. +------+------+------+------+------+------+------+------+
  389. | 0000 | 0011 | 0000 | 0111 | 0000 | 0100 | 0000 | 1000 | Binary
  390. +------+------+------+------+------+------+------+------+
  391. | 3 | 7 | 4 | 8 | Number
  392. +------+------+------+------+------+------+-------------+
  393. */
  394. /* Set/get methods */
  395. /* Constants for use with _mm_prefetch. */
  396. enum _mm_hint {
  397. _MM_HINT_NTA = 0, /* load data to L1 and L2 cache, mark it as NTA */
  398. _MM_HINT_T0 = 1, /* load data to L1 and L2 cache */
  399. _MM_HINT_T1 = 2, /* load data to L2 cache only */
  400. _MM_HINT_T2 = 3, /* load data to L2 cache only, mark it as NTA */
  401. _MM_HINT_ENTA = 4, /* exclusive version of _MM_HINT_NTA */
  402. _MM_HINT_ET0 = 5, /* exclusive version of _MM_HINT_T0 */
  403. _MM_HINT_ET1 = 6, /* exclusive version of _MM_HINT_T1 */
  404. _MM_HINT_ET2 = 7 /* exclusive version of _MM_HINT_T2 */
  405. };
  406. // Loads one cache line of data from address p to a location closer to the
  407. // processor. https://msdn.microsoft.com/en-us/library/84szxsww(v=vs.100).aspx
  408. FORCE_INLINE void _mm_prefetch(const void *p, int i)
  409. {
  410. (void) i;
  411. __builtin_prefetch(p);
  412. }
  413. // Pause the processor. This is typically used in spin-wait loops and depending
  414. // on the x86 processor typical values are in the 40-100 cycle range. The
  415. // 'yield' instruction isn't a good fit beacuse it's effectively a nop on most
  416. // Arm cores. Experience with several databases has shown has shown an 'isb' is
  417. // a reasonable approximation.
  418. FORCE_INLINE void _mm_pause()
  419. {
  420. __asm__ __volatile__("isb\n");
  421. }
  422. // Copy the lower single-precision (32-bit) floating-point element of a to dst.
  423. //
  424. // dst[31:0] := a[31:0]
  425. //
  426. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtss_f32
  427. FORCE_INLINE float _mm_cvtss_f32(__m128 a)
  428. {
  429. return vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
  430. }
  431. // Convert the lower single-precision (32-bit) floating-point element in b to a
  432. // double-precision (64-bit) floating-point element, store the result in the
  433. // lower element of dst, and copy the upper element from a to the upper element
  434. // of dst.
  435. //
  436. // dst[63:0] := Convert_FP32_To_FP64(b[31:0])
  437. // dst[127:64] := a[127:64]
  438. //
  439. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtss_sd
  440. FORCE_INLINE __m128d _mm_cvtss_sd(__m128d a, __m128 b)
  441. {
  442. double d = (double) vgetq_lane_f32(vreinterpretq_f32_m128(b), 0);
  443. #if defined(__aarch64__)
  444. return vreinterpretq_m128d_f64(
  445. vsetq_lane_f64(d, vreinterpretq_f64_m128d(a), 0));
  446. #else
  447. return vreinterpretq_m128d_s64(
  448. vsetq_lane_s64(*(int64_t *) &d, vreinterpretq_s64_m128d(a), 0));
  449. #endif
  450. }
  451. // Convert the lower single-precision (32-bit) floating-point element in a to a
  452. // 32-bit integer, and store the result in dst.
  453. //
  454. // dst[31:0] := Convert_FP32_To_Int32(a[31:0])
  455. //
  456. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtss_si32
  457. #define _mm_cvtss_si32(a) _mm_cvt_ss2si(a)
  458. // Convert the lower single-precision (32-bit) floating-point element in a to a
  459. // 64-bit integer, and store the result in dst.
  460. //
  461. // dst[63:0] := Convert_FP32_To_Int64(a[31:0])
  462. //
  463. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtss_si64
  464. FORCE_INLINE int _mm_cvtss_si64(__m128 a)
  465. {
  466. #if defined(__aarch64__)
  467. return vgetq_lane_s64(
  468. vreinterpretq_s64_s32(vcvtnq_s32_f32(vreinterpretq_f32_m128(a))), 0);
  469. #else
  470. float32_t data = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
  471. float32_t diff = data - floor(data);
  472. if (diff > 0.5)
  473. return (int64_t) ceil(data);
  474. if (unlikely(diff == 0.5)) {
  475. int64_t f = (int64_t) floor(data);
  476. int64_t c = (int64_t) ceil(data);
  477. return c & 1 ? f : c;
  478. }
  479. return (int64_t) floor(data);
  480. #endif
  481. }
  482. // Convert packed single-precision (32-bit) floating-point elements in a to
  483. // packed 32-bit integers with truncation, and store the results in dst.
  484. //
  485. // FOR j := 0 to 1
  486. // i := 32*j
  487. // dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i])
  488. // ENDFOR
  489. //
  490. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtt_ps2pi
  491. FORCE_INLINE __m64 _mm_cvtt_ps2pi(__m128 a)
  492. {
  493. return vreinterpret_m64_s32(
  494. vget_low_s32(vcvtq_s32_f32(vreinterpretq_f32_m128(a))));
  495. }
  496. // Convert the lower single-precision (32-bit) floating-point element in a to a
  497. // 32-bit integer with truncation, and store the result in dst.
  498. //
  499. // dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0])
  500. //
  501. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtt_ss2si
  502. FORCE_INLINE int _mm_cvtt_ss2si(__m128 a)
  503. {
  504. return vgetq_lane_s32(vcvtq_s32_f32(vreinterpretq_f32_m128(a)), 0);
  505. }
  506. // Convert packed single-precision (32-bit) floating-point elements in a to
  507. // packed 32-bit integers with truncation, and store the results in dst.
  508. //
  509. // FOR j := 0 to 1
  510. // i := 32*j
  511. // dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i])
  512. // ENDFOR
  513. //
  514. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttps_pi32
  515. #define _mm_cvttps_pi32(a) _mm_cvtt_ps2pi(a)
  516. // Convert the lower single-precision (32-bit) floating-point element in a to a
  517. // 32-bit integer with truncation, and store the result in dst.
  518. //
  519. // dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0])
  520. //
  521. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttss_si32
  522. #define _mm_cvttss_si32(a) _mm_cvtt_ss2si(a)
  523. // Convert the lower single-precision (32-bit) floating-point element in a to a
  524. // 64-bit integer with truncation, and store the result in dst.
  525. //
  526. // dst[63:0] := Convert_FP32_To_Int64_Truncate(a[31:0])
  527. //
  528. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttss_si64
  529. FORCE_INLINE int64_t _mm_cvttss_si64(__m128 a)
  530. {
  531. return vgetq_lane_s64(
  532. vmovl_s32(vget_low_s32(vcvtq_s32_f32(vreinterpretq_f32_m128(a)))), 0);
  533. }
  534. // Sets the 128-bit value to zero
  535. // https://msdn.microsoft.com/en-us/library/vstudio/ys7dw0kh(v=vs.100).aspx
  536. FORCE_INLINE __m128i _mm_setzero_si128(void)
  537. {
  538. return vreinterpretq_m128i_s32(vdupq_n_s32(0));
  539. }
  540. // Clears the four single-precision, floating-point values.
  541. // https://msdn.microsoft.com/en-us/library/vstudio/tk1t2tbz(v=vs.100).aspx
  542. FORCE_INLINE __m128 _mm_setzero_ps(void)
  543. {
  544. return vreinterpretq_m128_f32(vdupq_n_f32(0));
  545. }
  546. // Return vector of type __m128d with all elements set to zero.
  547. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_setzero_pd
  548. FORCE_INLINE __m128d _mm_setzero_pd(void)
  549. {
  550. #if defined(__aarch64__)
  551. return vreinterpretq_m128d_f64(vdupq_n_f64(0));
  552. #else
  553. return vreinterpretq_m128d_f32(vdupq_n_f32(0));
  554. #endif
  555. }
  556. // Sets the four single-precision, floating-point values to w.
  557. //
  558. // r0 := r1 := r2 := r3 := w
  559. //
  560. // https://msdn.microsoft.com/en-us/library/vstudio/2x1se8ha(v=vs.100).aspx
  561. FORCE_INLINE __m128 _mm_set1_ps(float _w)
  562. {
  563. return vreinterpretq_m128_f32(vdupq_n_f32(_w));
  564. }
  565. // Sets the four single-precision, floating-point values to w.
  566. // https://msdn.microsoft.com/en-us/library/vstudio/2x1se8ha(v=vs.100).aspx
  567. FORCE_INLINE __m128 _mm_set_ps1(float _w)
  568. {
  569. return vreinterpretq_m128_f32(vdupq_n_f32(_w));
  570. }
  571. // Sets the four single-precision, floating-point values to the four inputs.
  572. // https://msdn.microsoft.com/en-us/library/vstudio/afh0zf75(v=vs.100).aspx
  573. FORCE_INLINE __m128 _mm_set_ps(float w, float z, float y, float x)
  574. {
  575. float ALIGN_STRUCT(16) data[4] = {x, y, z, w};
  576. return vreinterpretq_m128_f32(vld1q_f32(data));
  577. }
  578. // Copy single-precision (32-bit) floating-point element a to the lower element
  579. // of dst, and zero the upper 3 elements.
  580. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_ss
  581. FORCE_INLINE __m128 _mm_set_ss(float a)
  582. {
  583. float ALIGN_STRUCT(16) data[4] = {a, 0, 0, 0};
  584. return vreinterpretq_m128_f32(vld1q_f32(data));
  585. }
  586. // Sets the four single-precision, floating-point values to the four inputs in
  587. // reverse order.
  588. // https://msdn.microsoft.com/en-us/library/vstudio/d2172ct3(v=vs.100).aspx
  589. FORCE_INLINE __m128 _mm_setr_ps(float w, float z, float y, float x)
  590. {
  591. float ALIGN_STRUCT(16) data[4] = {w, z, y, x};
  592. return vreinterpretq_m128_f32(vld1q_f32(data));
  593. }
  594. // Sets the 8 signed 16-bit integer values in reverse order.
  595. //
  596. // Return Value
  597. // r0 := w0
  598. // r1 := w1
  599. // ...
  600. // r7 := w7
  601. FORCE_INLINE __m128i _mm_setr_epi16(short w0,
  602. short w1,
  603. short w2,
  604. short w3,
  605. short w4,
  606. short w5,
  607. short w6,
  608. short w7)
  609. {
  610. int16_t ALIGN_STRUCT(16) data[8] = {w0, w1, w2, w3, w4, w5, w6, w7};
  611. return vreinterpretq_m128i_s16(vld1q_s16((int16_t *) data));
  612. }
  613. // Sets the 4 signed 32-bit integer values in reverse order
  614. // https://technet.microsoft.com/en-us/library/security/27yb3ee5(v=vs.90).aspx
  615. FORCE_INLINE __m128i _mm_setr_epi32(int i3, int i2, int i1, int i0)
  616. {
  617. int32_t ALIGN_STRUCT(16) data[4] = {i3, i2, i1, i0};
  618. return vreinterpretq_m128i_s32(vld1q_s32(data));
  619. }
  620. // Set packed 64-bit integers in dst with the supplied values in reverse order.
  621. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_setr_epi64
  622. FORCE_INLINE __m128i _mm_setr_epi64(__m64 e1, __m64 e0)
  623. {
  624. return vreinterpretq_m128i_s64(vcombine_s64(e1, e0));
  625. }
  626. // Sets the 16 signed 8-bit integer values to b.
  627. //
  628. // r0 := b
  629. // r1 := b
  630. // ...
  631. // r15 := b
  632. //
  633. // https://msdn.microsoft.com/en-us/library/6e14xhyf(v=vs.100).aspx
  634. FORCE_INLINE __m128i _mm_set1_epi8(signed char w)
  635. {
  636. return vreinterpretq_m128i_s8(vdupq_n_s8(w));
  637. }
  638. // Broadcast double-precision (64-bit) floating-point value a to all elements of
  639. // dst.
  640. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set1_pd
  641. FORCE_INLINE __m128d _mm_set1_pd(double d)
  642. {
  643. #if defined(__aarch64__)
  644. return vreinterpretq_m128d_f64(vdupq_n_f64(d));
  645. #else
  646. return vreinterpretq_m128d_s64(vdupq_n_s64(*(int64_t *) &d));
  647. #endif
  648. }
  649. // Sets the 8 signed 16-bit integer values to w.
  650. //
  651. // r0 := w
  652. // r1 := w
  653. // ...
  654. // r7 := w
  655. //
  656. // https://msdn.microsoft.com/en-us/library/k0ya3x0e(v=vs.90).aspx
  657. FORCE_INLINE __m128i _mm_set1_epi16(short w)
  658. {
  659. return vreinterpretq_m128i_s16(vdupq_n_s16(w));
  660. }
  661. // Sets the 16 signed 8-bit integer values.
  662. // https://msdn.microsoft.com/en-us/library/x0cx8zd3(v=vs.90).aspx
  663. FORCE_INLINE __m128i _mm_set_epi8(signed char b15,
  664. signed char b14,
  665. signed char b13,
  666. signed char b12,
  667. signed char b11,
  668. signed char b10,
  669. signed char b9,
  670. signed char b8,
  671. signed char b7,
  672. signed char b6,
  673. signed char b5,
  674. signed char b4,
  675. signed char b3,
  676. signed char b2,
  677. signed char b1,
  678. signed char b0)
  679. {
  680. int8_t ALIGN_STRUCT(16)
  681. data[16] = {(int8_t) b0, (int8_t) b1, (int8_t) b2, (int8_t) b3,
  682. (int8_t) b4, (int8_t) b5, (int8_t) b6, (int8_t) b7,
  683. (int8_t) b8, (int8_t) b9, (int8_t) b10, (int8_t) b11,
  684. (int8_t) b12, (int8_t) b13, (int8_t) b14, (int8_t) b15};
  685. return (__m128i) vld1q_s8(data);
  686. }
  687. // Sets the 8 signed 16-bit integer values.
  688. // https://msdn.microsoft.com/en-au/library/3e0fek84(v=vs.90).aspx
  689. FORCE_INLINE __m128i _mm_set_epi16(short i7,
  690. short i6,
  691. short i5,
  692. short i4,
  693. short i3,
  694. short i2,
  695. short i1,
  696. short i0)
  697. {
  698. int16_t ALIGN_STRUCT(16) data[8] = {i0, i1, i2, i3, i4, i5, i6, i7};
  699. return vreinterpretq_m128i_s16(vld1q_s16(data));
  700. }
  701. // Sets the 16 signed 8-bit integer values in reverse order.
  702. // https://msdn.microsoft.com/en-us/library/2khb9c7k(v=vs.90).aspx
  703. FORCE_INLINE __m128i _mm_setr_epi8(signed char b0,
  704. signed char b1,
  705. signed char b2,
  706. signed char b3,
  707. signed char b4,
  708. signed char b5,
  709. signed char b6,
  710. signed char b7,
  711. signed char b8,
  712. signed char b9,
  713. signed char b10,
  714. signed char b11,
  715. signed char b12,
  716. signed char b13,
  717. signed char b14,
  718. signed char b15)
  719. {
  720. int8_t ALIGN_STRUCT(16)
  721. data[16] = {(int8_t) b0, (int8_t) b1, (int8_t) b2, (int8_t) b3,
  722. (int8_t) b4, (int8_t) b5, (int8_t) b6, (int8_t) b7,
  723. (int8_t) b8, (int8_t) b9, (int8_t) b10, (int8_t) b11,
  724. (int8_t) b12, (int8_t) b13, (int8_t) b14, (int8_t) b15};
  725. return (__m128i) vld1q_s8(data);
  726. }
  727. // Sets the 4 signed 32-bit integer values to i.
  728. //
  729. // r0 := i
  730. // r1 := i
  731. // r2 := i
  732. // r3 := I
  733. //
  734. // https://msdn.microsoft.com/en-us/library/vstudio/h4xscxat(v=vs.100).aspx
  735. FORCE_INLINE __m128i _mm_set1_epi32(int _i)
  736. {
  737. return vreinterpretq_m128i_s32(vdupq_n_s32(_i));
  738. }
  739. // Sets the 2 signed 64-bit integer values to i.
  740. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/whtfzhzk(v=vs.100)
  741. FORCE_INLINE __m128i _mm_set1_epi64(__m64 _i)
  742. {
  743. return vreinterpretq_m128i_s64(vdupq_n_s64((int64_t) _i));
  744. }
  745. // Sets the 2 signed 64-bit integer values to i.
  746. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set1_epi64x
  747. FORCE_INLINE __m128i _mm_set1_epi64x(int64_t _i)
  748. {
  749. return vreinterpretq_m128i_s64(vdupq_n_s64(_i));
  750. }
  751. // Sets the 4 signed 32-bit integer values.
  752. // https://msdn.microsoft.com/en-us/library/vstudio/019beekt(v=vs.100).aspx
  753. FORCE_INLINE __m128i _mm_set_epi32(int i3, int i2, int i1, int i0)
  754. {
  755. int32_t ALIGN_STRUCT(16) data[4] = {i0, i1, i2, i3};
  756. return vreinterpretq_m128i_s32(vld1q_s32(data));
  757. }
  758. // Returns the __m128i structure with its two 64-bit integer values
  759. // initialized to the values of the two 64-bit integers passed in.
  760. // https://msdn.microsoft.com/en-us/library/dk2sdw0h(v=vs.120).aspx
  761. FORCE_INLINE __m128i _mm_set_epi64x(int64_t i1, int64_t i2)
  762. {
  763. return vreinterpretq_m128i_s64(
  764. vcombine_s64(vcreate_s64(i2), vcreate_s64(i1)));
  765. }
  766. // Returns the __m128i structure with its two 64-bit integer values
  767. // initialized to the values of the two 64-bit integers passed in.
  768. // https://msdn.microsoft.com/en-us/library/dk2sdw0h(v=vs.120).aspx
  769. FORCE_INLINE __m128i _mm_set_epi64(__m64 i1, __m64 i2)
  770. {
  771. return _mm_set_epi64x((int64_t) i1, (int64_t) i2);
  772. }
  773. // Set packed double-precision (64-bit) floating-point elements in dst with the
  774. // supplied values.
  775. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_pd
  776. FORCE_INLINE __m128d _mm_set_pd(double e1, double e0)
  777. {
  778. double ALIGN_STRUCT(16) data[2] = {e0, e1};
  779. #if defined(__aarch64__)
  780. return vreinterpretq_m128d_f64(vld1q_f64((float64_t *) data));
  781. #else
  782. return vreinterpretq_m128d_f32(vld1q_f32((float32_t *) data));
  783. #endif
  784. }
  785. // Set packed double-precision (64-bit) floating-point elements in dst with the
  786. // supplied values in reverse order.
  787. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_setr_pd
  788. FORCE_INLINE __m128d _mm_setr_pd(double e1, double e0)
  789. {
  790. return _mm_set_pd(e0, e1);
  791. }
  792. // Copy double-precision (64-bit) floating-point element a to the lower element
  793. // of dst, and zero the upper element.
  794. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_sd
  795. FORCE_INLINE __m128d _mm_set_sd(double a)
  796. {
  797. return _mm_set_pd(0, a);
  798. }
  799. // Broadcast double-precision (64-bit) floating-point value a to all elements of
  800. // dst.
  801. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_pd1
  802. #define _mm_set_pd1 _mm_set1_pd
  803. // Stores four single-precision, floating-point values.
  804. // https://msdn.microsoft.com/en-us/library/vstudio/s3h4ay6y(v=vs.100).aspx
  805. FORCE_INLINE void _mm_store_ps(float *p, __m128 a)
  806. {
  807. vst1q_f32(p, vreinterpretq_f32_m128(a));
  808. }
  809. // Store the lower single-precision (32-bit) floating-point element from a into
  810. // 4 contiguous elements in memory. mem_addr must be aligned on a 16-byte
  811. // boundary or a general-protection exception may be generated.
  812. //
  813. // MEM[mem_addr+31:mem_addr] := a[31:0]
  814. // MEM[mem_addr+63:mem_addr+32] := a[31:0]
  815. // MEM[mem_addr+95:mem_addr+64] := a[31:0]
  816. // MEM[mem_addr+127:mem_addr+96] := a[31:0]
  817. //
  818. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store_ps1
  819. FORCE_INLINE void _mm_store_ps1(float *p, __m128 a)
  820. {
  821. float32_t a0 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
  822. vst1q_f32(p, vdupq_n_f32(a0));
  823. }
  824. // Store the lower single-precision (32-bit) floating-point element from a into
  825. // 4 contiguous elements in memory. mem_addr must be aligned on a 16-byte
  826. // boundary or a general-protection exception may be generated.
  827. //
  828. // MEM[mem_addr+31:mem_addr] := a[31:0]
  829. // MEM[mem_addr+63:mem_addr+32] := a[31:0]
  830. // MEM[mem_addr+95:mem_addr+64] := a[31:0]
  831. // MEM[mem_addr+127:mem_addr+96] := a[31:0]
  832. //
  833. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store1_ps
  834. #define _mm_store1_ps _mm_store_ps1
  835. // Store 4 single-precision (32-bit) floating-point elements from a into memory
  836. // in reverse order. mem_addr must be aligned on a 16-byte boundary or a
  837. // general-protection exception may be generated.
  838. //
  839. // MEM[mem_addr+31:mem_addr] := a[127:96]
  840. // MEM[mem_addr+63:mem_addr+32] := a[95:64]
  841. // MEM[mem_addr+95:mem_addr+64] := a[63:32]
  842. // MEM[mem_addr+127:mem_addr+96] := a[31:0]
  843. //
  844. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storer_ps
  845. FORCE_INLINE void _mm_storer_ps(float *p, __m128 a)
  846. {
  847. float32x4_t tmp = vrev64q_f32(vreinterpretq_f32_m128(a));
  848. float32x4_t rev = vextq_f32(tmp, tmp, 2);
  849. vst1q_f32(p, rev);
  850. }
  851. // Stores four single-precision, floating-point values.
  852. // https://msdn.microsoft.com/en-us/library/44e30x22(v=vs.100).aspx
  853. FORCE_INLINE void _mm_storeu_ps(float *p, __m128 a)
  854. {
  855. vst1q_f32(p, vreinterpretq_f32_m128(a));
  856. }
  857. // Stores four 32-bit integer values as (as a __m128i value) at the address p.
  858. // https://msdn.microsoft.com/en-us/library/vstudio/edk11s13(v=vs.100).aspx
  859. FORCE_INLINE void _mm_store_si128(__m128i *p, __m128i a)
  860. {
  861. vst1q_s32((int32_t *) p, vreinterpretq_s32_m128i(a));
  862. }
  863. // Stores four 32-bit integer values as (as a __m128i value) at the address p.
  864. // https://msdn.microsoft.com/en-us/library/vstudio/edk11s13(v=vs.100).aspx
  865. FORCE_INLINE void _mm_storeu_si128(__m128i *p, __m128i a)
  866. {
  867. vst1q_s32((int32_t *) p, vreinterpretq_s32_m128i(a));
  868. }
  869. // Stores the lower single - precision, floating - point value.
  870. // https://msdn.microsoft.com/en-us/library/tzz10fbx(v=vs.100).aspx
  871. FORCE_INLINE void _mm_store_ss(float *p, __m128 a)
  872. {
  873. vst1q_lane_f32(p, vreinterpretq_f32_m128(a), 0);
  874. }
  875. // Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point
  876. // elements) from a into memory. mem_addr must be aligned on a 16-byte boundary
  877. // or a general-protection exception may be generated.
  878. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store_pd
  879. FORCE_INLINE void _mm_store_pd(double *mem_addr, __m128d a)
  880. {
  881. #if defined(__aarch64__)
  882. vst1q_f64((float64_t *) mem_addr, vreinterpretq_f64_m128d(a));
  883. #else
  884. vst1q_f32((float32_t *) mem_addr, vreinterpretq_f32_m128d(a));
  885. #endif
  886. }
  887. // Store the upper double-precision (64-bit) floating-point element from a into
  888. // memory.
  889. //
  890. // MEM[mem_addr+63:mem_addr] := a[127:64]
  891. //
  892. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storeh_pd
  893. FORCE_INLINE void _mm_storeh_pd(double *mem_addr, __m128d a)
  894. {
  895. #if defined(__aarch64__)
  896. vst1_f64((float64_t *) mem_addr, vget_high_f64(vreinterpretq_f64_m128d(a)));
  897. #else
  898. vst1_f32((float32_t *) mem_addr, vget_high_f32(vreinterpretq_f32_m128d(a)));
  899. #endif
  900. }
  901. // Store the lower double-precision (64-bit) floating-point element from a into
  902. // memory.
  903. //
  904. // MEM[mem_addr+63:mem_addr] := a[63:0]
  905. //
  906. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storel_pd
  907. FORCE_INLINE void _mm_storel_pd(double *mem_addr, __m128d a)
  908. {
  909. #if defined(__aarch64__)
  910. vst1_f64((float64_t *) mem_addr, vget_low_f64(vreinterpretq_f64_m128d(a)));
  911. #else
  912. vst1_f32((float32_t *) mem_addr, vget_low_f32(vreinterpretq_f32_m128d(a)));
  913. #endif
  914. }
  915. // Store 2 double-precision (64-bit) floating-point elements from a into memory
  916. // in reverse order. mem_addr must be aligned on a 16-byte boundary or a
  917. // general-protection exception may be generated.
  918. //
  919. // MEM[mem_addr+63:mem_addr] := a[127:64]
  920. // MEM[mem_addr+127:mem_addr+64] := a[63:0]
  921. //
  922. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storer_pd
  923. FORCE_INLINE void _mm_storer_pd(double *mem_addr, __m128d a)
  924. {
  925. float32x4_t f = vreinterpretq_f32_m128d(a);
  926. _mm_store_pd(mem_addr, vreinterpretq_m128d_f32(vextq_f32(f, f, 2)));
  927. }
  928. // Store the lower double-precision (64-bit) floating-point element from a into
  929. // 2 contiguous elements in memory. mem_addr must be aligned on a 16-byte
  930. // boundary or a general-protection exception may be generated.
  931. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store_pd1
  932. FORCE_INLINE void _mm_store_pd1(double *mem_addr, __m128d a)
  933. {
  934. #if defined(__aarch64__)
  935. float64x1_t a_low = vget_low_f64(vreinterpretq_f64_m128d(a));
  936. vst1q_f64((float64_t *) mem_addr,
  937. vreinterpretq_f64_m128d(vcombine_f64(a_low, a_low)));
  938. #else
  939. float32x2_t a_low = vget_low_f32(vreinterpretq_f32_m128d(a));
  940. vst1q_f32((float32_t *) mem_addr,
  941. vreinterpretq_f32_m128d(vcombine_f32(a_low, a_low)));
  942. #endif
  943. }
  944. // Store the lower double-precision (64-bit) floating-point element from a into
  945. // 2 contiguous elements in memory. mem_addr must be aligned on a 16-byte
  946. // boundary or a general-protection exception may be generated.
  947. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=9,526,5601&text=_mm_store1_pd
  948. #define _mm_store1_pd _mm_store_pd1
  949. // Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point
  950. // elements) from a into memory. mem_addr does not need to be aligned on any
  951. // particular boundary.
  952. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storeu_pd
  953. FORCE_INLINE void _mm_storeu_pd(double *mem_addr, __m128d a)
  954. {
  955. _mm_store_pd(mem_addr, a);
  956. }
  957. // Reads the lower 64 bits of b and stores them into the lower 64 bits of a.
  958. // https://msdn.microsoft.com/en-us/library/hhwf428f%28v=vs.90%29.aspx
  959. FORCE_INLINE void _mm_storel_epi64(__m128i *a, __m128i b)
  960. {
  961. uint64x1_t hi = vget_high_u64(vreinterpretq_u64_m128i(*a));
  962. uint64x1_t lo = vget_low_u64(vreinterpretq_u64_m128i(b));
  963. *a = vreinterpretq_m128i_u64(vcombine_u64(lo, hi));
  964. }
  965. // Stores the lower two single-precision floating point values of a to the
  966. // address p.
  967. //
  968. // *p0 := a0
  969. // *p1 := a1
  970. //
  971. // https://msdn.microsoft.com/en-us/library/h54t98ks(v=vs.90).aspx
  972. FORCE_INLINE void _mm_storel_pi(__m64 *p, __m128 a)
  973. {
  974. *p = vreinterpret_m64_f32(vget_low_f32(a));
  975. }
  976. // Stores the upper two single-precision, floating-point values of a to the
  977. // address p.
  978. //
  979. // *p0 := a2
  980. // *p1 := a3
  981. //
  982. // https://msdn.microsoft.com/en-us/library/a7525fs8(v%3dvs.90).aspx
  983. FORCE_INLINE void _mm_storeh_pi(__m64 *p, __m128 a)
  984. {
  985. *p = vreinterpret_m64_f32(vget_high_f32(a));
  986. }
  987. // Loads a single single-precision, floating-point value, copying it into all
  988. // four words
  989. // https://msdn.microsoft.com/en-us/library/vstudio/5cdkf716(v=vs.100).aspx
  990. FORCE_INLINE __m128 _mm_load1_ps(const float *p)
  991. {
  992. return vreinterpretq_m128_f32(vld1q_dup_f32(p));
  993. }
  994. // Load a single-precision (32-bit) floating-point element from memory into all
  995. // elements of dst.
  996. //
  997. // dst[31:0] := MEM[mem_addr+31:mem_addr]
  998. // dst[63:32] := MEM[mem_addr+31:mem_addr]
  999. // dst[95:64] := MEM[mem_addr+31:mem_addr]
  1000. // dst[127:96] := MEM[mem_addr+31:mem_addr]
  1001. //
  1002. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_ps1
  1003. #define _mm_load_ps1 _mm_load1_ps
  1004. // Sets the lower two single-precision, floating-point values with 64
  1005. // bits of data loaded from the address p; the upper two values are passed
  1006. // through from a.
  1007. //
  1008. // Return Value
  1009. // r0 := *p0
  1010. // r1 := *p1
  1011. // r2 := a2
  1012. // r3 := a3
  1013. //
  1014. // https://msdn.microsoft.com/en-us/library/s57cyak2(v=vs.100).aspx
  1015. FORCE_INLINE __m128 _mm_loadl_pi(__m128 a, __m64 const *p)
  1016. {
  1017. return vreinterpretq_m128_f32(
  1018. vcombine_f32(vld1_f32((const float32_t *) p), vget_high_f32(a)));
  1019. }
  1020. // Load 4 single-precision (32-bit) floating-point elements from memory into dst
  1021. // in reverse order. mem_addr must be aligned on a 16-byte boundary or a
  1022. // general-protection exception may be generated.
  1023. //
  1024. // dst[31:0] := MEM[mem_addr+127:mem_addr+96]
  1025. // dst[63:32] := MEM[mem_addr+95:mem_addr+64]
  1026. // dst[95:64] := MEM[mem_addr+63:mem_addr+32]
  1027. // dst[127:96] := MEM[mem_addr+31:mem_addr]
  1028. //
  1029. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadr_ps
  1030. FORCE_INLINE __m128 _mm_loadr_ps(const float *p)
  1031. {
  1032. float32x4_t v = vrev64q_f32(vld1q_f32(p));
  1033. return vreinterpretq_m128_f32(vextq_f32(v, v, 2));
  1034. }
  1035. // Sets the upper two single-precision, floating-point values with 64
  1036. // bits of data loaded from the address p; the lower two values are passed
  1037. // through from a.
  1038. //
  1039. // r0 := a0
  1040. // r1 := a1
  1041. // r2 := *p0
  1042. // r3 := *p1
  1043. //
  1044. // https://msdn.microsoft.com/en-us/library/w92wta0x(v%3dvs.100).aspx
  1045. FORCE_INLINE __m128 _mm_loadh_pi(__m128 a, __m64 const *p)
  1046. {
  1047. return vreinterpretq_m128_f32(
  1048. vcombine_f32(vget_low_f32(a), vld1_f32((const float32_t *) p)));
  1049. }
  1050. // Loads four single-precision, floating-point values.
  1051. // https://msdn.microsoft.com/en-us/library/vstudio/zzd50xxt(v=vs.100).aspx
  1052. FORCE_INLINE __m128 _mm_load_ps(const float *p)
  1053. {
  1054. return vreinterpretq_m128_f32(vld1q_f32(p));
  1055. }
  1056. // Loads four single-precision, floating-point values.
  1057. // https://msdn.microsoft.com/en-us/library/x1b16s7z%28v=vs.90%29.aspx
  1058. FORCE_INLINE __m128 _mm_loadu_ps(const float *p)
  1059. {
  1060. // for neon, alignment doesn't matter, so _mm_load_ps and _mm_loadu_ps are
  1061. // equivalent for neon
  1062. return vreinterpretq_m128_f32(vld1q_f32(p));
  1063. }
  1064. // Load unaligned 16-bit integer from memory into the first element of dst.
  1065. //
  1066. // dst[15:0] := MEM[mem_addr+15:mem_addr]
  1067. // dst[MAX:16] := 0
  1068. //
  1069. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadu_si16
  1070. FORCE_INLINE __m128i _mm_loadu_si16(const void *p)
  1071. {
  1072. return vreinterpretq_m128i_s16(
  1073. vsetq_lane_s16(*(const int16_t *) p, vdupq_n_s16(0), 0));
  1074. }
  1075. // Load unaligned 64-bit integer from memory into the first element of dst.
  1076. //
  1077. // dst[63:0] := MEM[mem_addr+63:mem_addr]
  1078. // dst[MAX:64] := 0
  1079. //
  1080. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadu_si64
  1081. FORCE_INLINE __m128i _mm_loadu_si64(const void *p)
  1082. {
  1083. return vreinterpretq_m128i_s64(
  1084. vcombine_s64(vld1_s64((const int64_t *) p), vdup_n_s64(0)));
  1085. }
  1086. // Load a double-precision (64-bit) floating-point element from memory into the
  1087. // lower of dst, and zero the upper element. mem_addr does not need to be
  1088. // aligned on any particular boundary.
  1089. //
  1090. // dst[63:0] := MEM[mem_addr+63:mem_addr]
  1091. // dst[127:64] := 0
  1092. //
  1093. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_sd
  1094. FORCE_INLINE __m128d _mm_load_sd(const double *p)
  1095. {
  1096. #if defined(__aarch64__)
  1097. return vreinterpretq_m128d_f64(vsetq_lane_f64(*p, vdupq_n_f64(0), 0));
  1098. #else
  1099. const float *fp = (const float *) p;
  1100. float ALIGN_STRUCT(16) data[4] = {fp[0], fp[1], 0, 0};
  1101. return vreinterpretq_m128d_f32(vld1q_f32(data));
  1102. #endif
  1103. }
  1104. // Loads two double-precision from 16-byte aligned memory, floating-point
  1105. // values.
  1106. //
  1107. // dst[127:0] := MEM[mem_addr+127:mem_addr]
  1108. //
  1109. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_pd
  1110. FORCE_INLINE __m128d _mm_load_pd(const double *p)
  1111. {
  1112. #if defined(__aarch64__)
  1113. return vreinterpretq_m128d_f64(vld1q_f64(p));
  1114. #else
  1115. const float *fp = (const float *) p;
  1116. float ALIGN_STRUCT(16) data[4] = {fp[0], fp[1], fp[2], fp[3]};
  1117. return vreinterpretq_m128d_f32(vld1q_f32(data));
  1118. #endif
  1119. }
  1120. // Loads two double-precision from unaligned memory, floating-point values.
  1121. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadu_pd
  1122. FORCE_INLINE __m128d _mm_loadu_pd(const double *p)
  1123. {
  1124. return _mm_load_pd(p);
  1125. }
  1126. // Loads an single - precision, floating - point value into the low word and
  1127. // clears the upper three words.
  1128. // https://msdn.microsoft.com/en-us/library/548bb9h4%28v=vs.90%29.aspx
  1129. FORCE_INLINE __m128 _mm_load_ss(const float *p)
  1130. {
  1131. return vreinterpretq_m128_f32(vsetq_lane_f32(*p, vdupq_n_f32(0), 0));
  1132. }
  1133. // Load 64-bit integer from memory into the first element of dst.
  1134. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadl_epi64
  1135. FORCE_INLINE __m128i _mm_loadl_epi64(__m128i const *p)
  1136. {
  1137. /* Load the lower 64 bits of the value pointed to by p into the
  1138. * lower 64 bits of the result, zeroing the upper 64 bits of the result.
  1139. */
  1140. return vreinterpretq_m128i_s32(
  1141. vcombine_s32(vld1_s32((int32_t const *) p), vcreate_s32(0)));
  1142. }
  1143. // Load a double-precision (64-bit) floating-point element from memory into the
  1144. // lower element of dst, and copy the upper element from a to dst. mem_addr does
  1145. // not need to be aligned on any particular boundary.
  1146. //
  1147. // dst[63:0] := MEM[mem_addr+63:mem_addr]
  1148. // dst[127:64] := a[127:64]
  1149. //
  1150. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadl_pd
  1151. FORCE_INLINE __m128d _mm_loadl_pd(__m128d a, const double *p)
  1152. {
  1153. #if defined(__aarch64__)
  1154. return vreinterpretq_m128d_f64(
  1155. vcombine_f64(vld1_f64(p), vget_high_f64(vreinterpretq_f64_m128d(a))));
  1156. #else
  1157. return vreinterpretq_m128d_f32(
  1158. vcombine_f32(vld1_f32((const float *) p),
  1159. vget_high_f32(vreinterpretq_f32_m128d(a))));
  1160. #endif
  1161. }
  1162. // Load 2 double-precision (64-bit) floating-point elements from memory into dst
  1163. // in reverse order. mem_addr must be aligned on a 16-byte boundary or a
  1164. // general-protection exception may be generated.
  1165. //
  1166. // dst[63:0] := MEM[mem_addr+127:mem_addr+64]
  1167. // dst[127:64] := MEM[mem_addr+63:mem_addr]
  1168. //
  1169. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadr_pd
  1170. FORCE_INLINE __m128d _mm_loadr_pd(const double *p)
  1171. {
  1172. #if defined(__aarch64__)
  1173. float64x2_t v = vld1q_f64(p);
  1174. return vreinterpretq_m128d_f64(vextq_f64(v, v, 1));
  1175. #else
  1176. int64x2_t v = vld1q_s64((const int64_t *) p);
  1177. return vreinterpretq_m128d_s64(vextq_s64(v, v, 1));
  1178. #endif
  1179. }
  1180. // Sets the low word to the single-precision, floating-point value of b
  1181. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/35hdzazd(v=vs.100)
  1182. FORCE_INLINE __m128 _mm_move_ss(__m128 a, __m128 b)
  1183. {
  1184. return vreinterpretq_m128_f32(
  1185. vsetq_lane_f32(vgetq_lane_f32(vreinterpretq_f32_m128(b), 0),
  1186. vreinterpretq_f32_m128(a), 0));
  1187. }
  1188. // Move the lower double-precision (64-bit) floating-point element from b to the
  1189. // lower element of dst, and copy the upper element from a to the upper element
  1190. // of dst.
  1191. //
  1192. // dst[63:0] := b[63:0]
  1193. // dst[127:64] := a[127:64]
  1194. //
  1195. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_move_sd
  1196. FORCE_INLINE __m128d _mm_move_sd(__m128d a, __m128d b)
  1197. {
  1198. return vreinterpretq_m128d_f32(
  1199. vcombine_f32(vget_low_f32(vreinterpretq_f32_m128d(b)),
  1200. vget_high_f32(vreinterpretq_f32_m128d(a))));
  1201. }
  1202. // Copy the lower 64-bit integer in a to the lower element of dst, and zero the
  1203. // upper element.
  1204. //
  1205. // dst[63:0] := a[63:0]
  1206. // dst[127:64] := 0
  1207. //
  1208. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_move_epi64
  1209. FORCE_INLINE __m128i _mm_move_epi64(__m128i a)
  1210. {
  1211. return vreinterpretq_m128i_s64(
  1212. vsetq_lane_s64(0, vreinterpretq_s64_m128i(a), 1));
  1213. }
  1214. // Return vector of type __m128 with undefined elements.
  1215. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_undefined_ps
  1216. FORCE_INLINE __m128 _mm_undefined_ps(void)
  1217. {
  1218. #if defined(__GNUC__) || defined(__clang__)
  1219. #pragma GCC diagnostic push
  1220. #pragma GCC diagnostic ignored "-Wuninitialized"
  1221. #endif
  1222. __m128 a;
  1223. return a;
  1224. #if defined(__GNUC__) || defined(__clang__)
  1225. #pragma GCC diagnostic pop
  1226. #endif
  1227. }
  1228. /* Logic/Binary operations */
  1229. // Computes the bitwise AND-NOT of the four single-precision, floating-point
  1230. // values of a and b.
  1231. //
  1232. // r0 := ~a0 & b0
  1233. // r1 := ~a1 & b1
  1234. // r2 := ~a2 & b2
  1235. // r3 := ~a3 & b3
  1236. //
  1237. // https://msdn.microsoft.com/en-us/library/vstudio/68h7wd02(v=vs.100).aspx
  1238. FORCE_INLINE __m128 _mm_andnot_ps(__m128 a, __m128 b)
  1239. {
  1240. return vreinterpretq_m128_s32(
  1241. vbicq_s32(vreinterpretq_s32_m128(b),
  1242. vreinterpretq_s32_m128(a))); // *NOTE* argument swap
  1243. }
  1244. // Compute the bitwise NOT of packed double-precision (64-bit) floating-point
  1245. // elements in a and then AND with b, and store the results in dst.
  1246. //
  1247. // FOR j := 0 to 1
  1248. // i := j*64
  1249. // dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i])
  1250. // ENDFOR
  1251. //
  1252. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_andnot_pd
  1253. FORCE_INLINE __m128d _mm_andnot_pd(__m128d a, __m128d b)
  1254. {
  1255. // *NOTE* argument swap
  1256. return vreinterpretq_m128d_s64(
  1257. vbicq_s64(vreinterpretq_s64_m128d(b), vreinterpretq_s64_m128d(a)));
  1258. }
  1259. // Computes the bitwise AND of the 128-bit value in b and the bitwise NOT of the
  1260. // 128-bit value in a.
  1261. //
  1262. // r := (~a) & b
  1263. //
  1264. // https://msdn.microsoft.com/en-us/library/vstudio/1beaceh8(v=vs.100).aspx
  1265. FORCE_INLINE __m128i _mm_andnot_si128(__m128i a, __m128i b)
  1266. {
  1267. return vreinterpretq_m128i_s32(
  1268. vbicq_s32(vreinterpretq_s32_m128i(b),
  1269. vreinterpretq_s32_m128i(a))); // *NOTE* argument swap
  1270. }
  1271. // Computes the bitwise AND of the 128-bit value in a and the 128-bit value in
  1272. // b.
  1273. //
  1274. // r := a & b
  1275. //
  1276. // https://msdn.microsoft.com/en-us/library/vstudio/6d1txsa8(v=vs.100).aspx
  1277. FORCE_INLINE __m128i _mm_and_si128(__m128i a, __m128i b)
  1278. {
  1279. return vreinterpretq_m128i_s32(
  1280. vandq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
  1281. }
  1282. // Computes the bitwise AND of the four single-precision, floating-point values
  1283. // of a and b.
  1284. //
  1285. // r0 := a0 & b0
  1286. // r1 := a1 & b1
  1287. // r2 := a2 & b2
  1288. // r3 := a3 & b3
  1289. //
  1290. // https://msdn.microsoft.com/en-us/library/vstudio/73ck1xc5(v=vs.100).aspx
  1291. FORCE_INLINE __m128 _mm_and_ps(__m128 a, __m128 b)
  1292. {
  1293. return vreinterpretq_m128_s32(
  1294. vandq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b)));
  1295. }
  1296. // Compute the bitwise AND of packed double-precision (64-bit) floating-point
  1297. // elements in a and b, and store the results in dst.
  1298. //
  1299. // FOR j := 0 to 1
  1300. // i := j*64
  1301. // dst[i+63:i] := a[i+63:i] AND b[i+63:i]
  1302. // ENDFOR
  1303. //
  1304. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_and_pd
  1305. FORCE_INLINE __m128d _mm_and_pd(__m128d a, __m128d b)
  1306. {
  1307. return vreinterpretq_m128d_s64(
  1308. vandq_s64(vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b)));
  1309. }
  1310. // Computes the bitwise OR of the four single-precision, floating-point values
  1311. // of a and b.
  1312. // https://msdn.microsoft.com/en-us/library/vstudio/7ctdsyy0(v=vs.100).aspx
  1313. FORCE_INLINE __m128 _mm_or_ps(__m128 a, __m128 b)
  1314. {
  1315. return vreinterpretq_m128_s32(
  1316. vorrq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b)));
  1317. }
  1318. // Computes bitwise EXOR (exclusive-or) of the four single-precision,
  1319. // floating-point values of a and b.
  1320. // https://msdn.microsoft.com/en-us/library/ss6k3wk8(v=vs.100).aspx
  1321. FORCE_INLINE __m128 _mm_xor_ps(__m128 a, __m128 b)
  1322. {
  1323. return vreinterpretq_m128_s32(
  1324. veorq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b)));
  1325. }
  1326. // Compute the bitwise XOR of packed double-precision (64-bit) floating-point
  1327. // elements in a and b, and store the results in dst.
  1328. //
  1329. // FOR j := 0 to 1
  1330. // i := j*64
  1331. // dst[i+63:i] := a[i+63:i] XOR b[i+63:i]
  1332. // ENDFOR
  1333. //
  1334. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_xor_pd
  1335. FORCE_INLINE __m128d _mm_xor_pd(__m128d a, __m128d b)
  1336. {
  1337. return vreinterpretq_m128d_s64(
  1338. veorq_s64(vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b)));
  1339. }
  1340. // Compute the bitwise OR of packed double-precision (64-bit) floating-point
  1341. // elements in a and b, and store the results in dst.
  1342. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_or_pd
  1343. FORCE_INLINE __m128d _mm_or_pd(__m128d a, __m128d b)
  1344. {
  1345. return vreinterpretq_m128d_s64(
  1346. vorrq_s64(vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b)));
  1347. }
  1348. // Computes the bitwise OR of the 128-bit value in a and the 128-bit value in b.
  1349. //
  1350. // r := a | b
  1351. //
  1352. // https://msdn.microsoft.com/en-us/library/vstudio/ew8ty0db(v=vs.100).aspx
  1353. FORCE_INLINE __m128i _mm_or_si128(__m128i a, __m128i b)
  1354. {
  1355. return vreinterpretq_m128i_s32(
  1356. vorrq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
  1357. }
  1358. // Computes the bitwise XOR of the 128-bit value in a and the 128-bit value in
  1359. // b. https://msdn.microsoft.com/en-us/library/fzt08www(v=vs.100).aspx
  1360. FORCE_INLINE __m128i _mm_xor_si128(__m128i a, __m128i b)
  1361. {
  1362. return vreinterpretq_m128i_s32(
  1363. veorq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
  1364. }
  1365. // Duplicate the low double-precision (64-bit) floating-point element from a,
  1366. // and store the results in dst.
  1367. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movedup_pd
  1368. FORCE_INLINE __m128d _mm_movedup_pd(__m128d a)
  1369. {
  1370. #if (__aarch64__)
  1371. return vreinterpretq_m128d_f64(
  1372. vdupq_laneq_f64(vreinterpretq_f64_m128d(a), 0));
  1373. #else
  1374. return vreinterpretq_m128d_u64(
  1375. vdupq_n_u64(vgetq_lane_u64(vreinterpretq_u64_m128d(a), 0)));
  1376. #endif
  1377. }
  1378. // Duplicate odd-indexed single-precision (32-bit) floating-point elements
  1379. // from a, and store the results in dst.
  1380. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movehdup_ps
  1381. FORCE_INLINE __m128 _mm_movehdup_ps(__m128 a)
  1382. {
  1383. #if __has_builtin(__builtin_shufflevector)
  1384. return vreinterpretq_m128_f32(__builtin_shufflevector(
  1385. vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 1, 1, 3, 3));
  1386. #else
  1387. float32_t a1 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 1);
  1388. float32_t a3 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 3);
  1389. float ALIGN_STRUCT(16) data[4] = {a1, a1, a3, a3};
  1390. return vreinterpretq_m128_f32(vld1q_f32(data));
  1391. #endif
  1392. }
  1393. // Duplicate even-indexed single-precision (32-bit) floating-point elements
  1394. // from a, and store the results in dst.
  1395. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_moveldup_ps
  1396. FORCE_INLINE __m128 _mm_moveldup_ps(__m128 a)
  1397. {
  1398. #if __has_builtin(__builtin_shufflevector)
  1399. return vreinterpretq_m128_f32(__builtin_shufflevector(
  1400. vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 0, 0, 2, 2));
  1401. #else
  1402. float32_t a0 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
  1403. float32_t a2 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 2);
  1404. float ALIGN_STRUCT(16) data[4] = {a0, a0, a2, a2};
  1405. return vreinterpretq_m128_f32(vld1q_f32(data));
  1406. #endif
  1407. }
  1408. // Moves the upper two values of B into the lower two values of A.
  1409. //
  1410. // r3 := a3
  1411. // r2 := a2
  1412. // r1 := b3
  1413. // r0 := b2
  1414. FORCE_INLINE __m128 _mm_movehl_ps(__m128 __A, __m128 __B)
  1415. {
  1416. float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(__A));
  1417. float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(__B));
  1418. return vreinterpretq_m128_f32(vcombine_f32(b32, a32));
  1419. }
  1420. // Moves the lower two values of B into the upper two values of A.
  1421. //
  1422. // r3 := b1
  1423. // r2 := b0
  1424. // r1 := a1
  1425. // r0 := a0
  1426. FORCE_INLINE __m128 _mm_movelh_ps(__m128 __A, __m128 __B)
  1427. {
  1428. float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(__A));
  1429. float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(__B));
  1430. return vreinterpretq_m128_f32(vcombine_f32(a10, b10));
  1431. }
  1432. // Compute the absolute value of packed signed 32-bit integers in a, and store
  1433. // the unsigned results in dst.
  1434. //
  1435. // FOR j := 0 to 3
  1436. // i := j*32
  1437. // dst[i+31:i] := ABS(a[i+31:i])
  1438. // ENDFOR
  1439. //
  1440. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_epi32
  1441. FORCE_INLINE __m128i _mm_abs_epi32(__m128i a)
  1442. {
  1443. return vreinterpretq_m128i_s32(vabsq_s32(vreinterpretq_s32_m128i(a)));
  1444. }
  1445. // Compute the absolute value of packed signed 16-bit integers in a, and store
  1446. // the unsigned results in dst.
  1447. //
  1448. // FOR j := 0 to 7
  1449. // i := j*16
  1450. // dst[i+15:i] := ABS(a[i+15:i])
  1451. // ENDFOR
  1452. //
  1453. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_epi16
  1454. FORCE_INLINE __m128i _mm_abs_epi16(__m128i a)
  1455. {
  1456. return vreinterpretq_m128i_s16(vabsq_s16(vreinterpretq_s16_m128i(a)));
  1457. }
  1458. // Compute the absolute value of packed signed 8-bit integers in a, and store
  1459. // the unsigned results in dst.
  1460. //
  1461. // FOR j := 0 to 15
  1462. // i := j*8
  1463. // dst[i+7:i] := ABS(a[i+7:i])
  1464. // ENDFOR
  1465. //
  1466. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_epi8
  1467. FORCE_INLINE __m128i _mm_abs_epi8(__m128i a)
  1468. {
  1469. return vreinterpretq_m128i_s8(vabsq_s8(vreinterpretq_s8_m128i(a)));
  1470. }
  1471. // Compute the absolute value of packed signed 32-bit integers in a, and store
  1472. // the unsigned results in dst.
  1473. //
  1474. // FOR j := 0 to 1
  1475. // i := j*32
  1476. // dst[i+31:i] := ABS(a[i+31:i])
  1477. // ENDFOR
  1478. //
  1479. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_pi32
  1480. FORCE_INLINE __m64 _mm_abs_pi32(__m64 a)
  1481. {
  1482. return vreinterpret_m64_s32(vabs_s32(vreinterpret_s32_m64(a)));
  1483. }
  1484. // Compute the absolute value of packed signed 16-bit integers in a, and store
  1485. // the unsigned results in dst.
  1486. //
  1487. // FOR j := 0 to 3
  1488. // i := j*16
  1489. // dst[i+15:i] := ABS(a[i+15:i])
  1490. // ENDFOR
  1491. //
  1492. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_pi16
  1493. FORCE_INLINE __m64 _mm_abs_pi16(__m64 a)
  1494. {
  1495. return vreinterpret_m64_s16(vabs_s16(vreinterpret_s16_m64(a)));
  1496. }
  1497. // Compute the absolute value of packed signed 8-bit integers in a, and store
  1498. // the unsigned results in dst.
  1499. //
  1500. // FOR j := 0 to 7
  1501. // i := j*8
  1502. // dst[i+7:i] := ABS(a[i+7:i])
  1503. // ENDFOR
  1504. //
  1505. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_pi8
  1506. FORCE_INLINE __m64 _mm_abs_pi8(__m64 a)
  1507. {
  1508. return vreinterpret_m64_s8(vabs_s8(vreinterpret_s8_m64(a)));
  1509. }
  1510. // Concatenate 16-byte blocks in a and b into a 32-byte temporary result, shift
  1511. // the result right by imm8 bytes, and store the low 16 bytes in dst.
  1512. //
  1513. // tmp[255:0] := ((a[127:0] << 128)[255:0] OR b[127:0]) >> (imm8*8)
  1514. // dst[127:0] := tmp[127:0]
  1515. //
  1516. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_alignr_epi8
  1517. #define _mm_alignr_epi8(a, b, imm) \
  1518. __extension__({ \
  1519. __m128i ret; \
  1520. if (unlikely((imm) >= 32)) { \
  1521. ret = _mm_setzero_si128(); \
  1522. } else { \
  1523. uint8x16_t tmp_low, tmp_high; \
  1524. if (imm >= 16) { \
  1525. const int idx = imm - 16; \
  1526. tmp_low = vreinterpretq_u8_m128i(a); \
  1527. tmp_high = vdupq_n_u8(0); \
  1528. ret = \
  1529. vreinterpretq_m128i_u8(vextq_u8(tmp_low, tmp_high, idx)); \
  1530. } else { \
  1531. const int idx = imm; \
  1532. tmp_low = vreinterpretq_u8_m128i(b); \
  1533. tmp_high = vreinterpretq_u8_m128i(a); \
  1534. ret = \
  1535. vreinterpretq_m128i_u8(vextq_u8(tmp_low, tmp_high, idx)); \
  1536. } \
  1537. } \
  1538. ret; \
  1539. })
  1540. // Concatenate 8-byte blocks in a and b into a 16-byte temporary result, shift
  1541. // the result right by imm8 bytes, and store the low 8 bytes in dst.
  1542. //
  1543. // tmp[127:0] := ((a[63:0] << 64)[127:0] OR b[63:0]) >> (imm8*8)
  1544. // dst[63:0] := tmp[63:0]
  1545. //
  1546. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_alignr_pi8
  1547. #define _mm_alignr_pi8(a, b, imm) \
  1548. __extension__({ \
  1549. __m64 ret; \
  1550. if (unlikely((imm) >= 16)) { \
  1551. ret = vreinterpret_m64_s8(vdup_n_s8(0)); \
  1552. } else { \
  1553. uint8x8_t tmp_low, tmp_high; \
  1554. if (imm >= 8) { \
  1555. const int idx = imm - 8; \
  1556. tmp_low = vreinterpret_u8_m64(a); \
  1557. tmp_high = vdup_n_u8(0); \
  1558. ret = vreinterpret_m64_u8(vext_u8(tmp_low, tmp_high, idx)); \
  1559. } else { \
  1560. const int idx = imm; \
  1561. tmp_low = vreinterpret_u8_m64(b); \
  1562. tmp_high = vreinterpret_u8_m64(a); \
  1563. ret = vreinterpret_m64_u8(vext_u8(tmp_low, tmp_high, idx)); \
  1564. } \
  1565. } \
  1566. ret; \
  1567. })
  1568. // Takes the upper 64 bits of a and places it in the low end of the result
  1569. // Takes the lower 64 bits of b and places it into the high end of the result.
  1570. FORCE_INLINE __m128 _mm_shuffle_ps_1032(__m128 a, __m128 b)
  1571. {
  1572. float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
  1573. float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
  1574. return vreinterpretq_m128_f32(vcombine_f32(a32, b10));
  1575. }
  1576. // takes the lower two 32-bit values from a and swaps them and places in high
  1577. // end of result takes the higher two 32 bit values from b and swaps them and
  1578. // places in low end of result.
  1579. FORCE_INLINE __m128 _mm_shuffle_ps_2301(__m128 a, __m128 b)
  1580. {
  1581. float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
  1582. float32x2_t b23 = vrev64_f32(vget_high_f32(vreinterpretq_f32_m128(b)));
  1583. return vreinterpretq_m128_f32(vcombine_f32(a01, b23));
  1584. }
  1585. FORCE_INLINE __m128 _mm_shuffle_ps_0321(__m128 a, __m128 b)
  1586. {
  1587. float32x2_t a21 = vget_high_f32(
  1588. vextq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 3));
  1589. float32x2_t b03 = vget_low_f32(
  1590. vextq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b), 3));
  1591. return vreinterpretq_m128_f32(vcombine_f32(a21, b03));
  1592. }
  1593. FORCE_INLINE __m128 _mm_shuffle_ps_2103(__m128 a, __m128 b)
  1594. {
  1595. float32x2_t a03 = vget_low_f32(
  1596. vextq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 3));
  1597. float32x2_t b21 = vget_high_f32(
  1598. vextq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b), 3));
  1599. return vreinterpretq_m128_f32(vcombine_f32(a03, b21));
  1600. }
  1601. FORCE_INLINE __m128 _mm_shuffle_ps_1010(__m128 a, __m128 b)
  1602. {
  1603. float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
  1604. float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
  1605. return vreinterpretq_m128_f32(vcombine_f32(a10, b10));
  1606. }
  1607. FORCE_INLINE __m128 _mm_shuffle_ps_1001(__m128 a, __m128 b)
  1608. {
  1609. float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
  1610. float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
  1611. return vreinterpretq_m128_f32(vcombine_f32(a01, b10));
  1612. }
  1613. FORCE_INLINE __m128 _mm_shuffle_ps_0101(__m128 a, __m128 b)
  1614. {
  1615. float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
  1616. float32x2_t b01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(b)));
  1617. return vreinterpretq_m128_f32(vcombine_f32(a01, b01));
  1618. }
  1619. // keeps the low 64 bits of b in the low and puts the high 64 bits of a in the
  1620. // high
  1621. FORCE_INLINE __m128 _mm_shuffle_ps_3210(__m128 a, __m128 b)
  1622. {
  1623. float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
  1624. float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
  1625. return vreinterpretq_m128_f32(vcombine_f32(a10, b32));
  1626. }
  1627. FORCE_INLINE __m128 _mm_shuffle_ps_0011(__m128 a, __m128 b)
  1628. {
  1629. float32x2_t a11 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(a)), 1);
  1630. float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
  1631. return vreinterpretq_m128_f32(vcombine_f32(a11, b00));
  1632. }
  1633. FORCE_INLINE __m128 _mm_shuffle_ps_0022(__m128 a, __m128 b)
  1634. {
  1635. float32x2_t a22 =
  1636. vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 0);
  1637. float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
  1638. return vreinterpretq_m128_f32(vcombine_f32(a22, b00));
  1639. }
  1640. FORCE_INLINE __m128 _mm_shuffle_ps_2200(__m128 a, __m128 b)
  1641. {
  1642. float32x2_t a00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(a)), 0);
  1643. float32x2_t b22 =
  1644. vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(b)), 0);
  1645. return vreinterpretq_m128_f32(vcombine_f32(a00, b22));
  1646. }
  1647. FORCE_INLINE __m128 _mm_shuffle_ps_3202(__m128 a, __m128 b)
  1648. {
  1649. float32_t a0 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
  1650. float32x2_t a22 =
  1651. vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 0);
  1652. float32x2_t a02 = vset_lane_f32(a0, a22, 1); /* TODO: use vzip ?*/
  1653. float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
  1654. return vreinterpretq_m128_f32(vcombine_f32(a02, b32));
  1655. }
  1656. FORCE_INLINE __m128 _mm_shuffle_ps_1133(__m128 a, __m128 b)
  1657. {
  1658. float32x2_t a33 =
  1659. vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 1);
  1660. float32x2_t b11 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 1);
  1661. return vreinterpretq_m128_f32(vcombine_f32(a33, b11));
  1662. }
  1663. FORCE_INLINE __m128 _mm_shuffle_ps_2010(__m128 a, __m128 b)
  1664. {
  1665. float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
  1666. float32_t b2 = vgetq_lane_f32(vreinterpretq_f32_m128(b), 2);
  1667. float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
  1668. float32x2_t b20 = vset_lane_f32(b2, b00, 1);
  1669. return vreinterpretq_m128_f32(vcombine_f32(a10, b20));
  1670. }
  1671. FORCE_INLINE __m128 _mm_shuffle_ps_2001(__m128 a, __m128 b)
  1672. {
  1673. float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
  1674. float32_t b2 = vgetq_lane_f32(b, 2);
  1675. float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
  1676. float32x2_t b20 = vset_lane_f32(b2, b00, 1);
  1677. return vreinterpretq_m128_f32(vcombine_f32(a01, b20));
  1678. }
  1679. FORCE_INLINE __m128 _mm_shuffle_ps_2032(__m128 a, __m128 b)
  1680. {
  1681. float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
  1682. float32_t b2 = vgetq_lane_f32(b, 2);
  1683. float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
  1684. float32x2_t b20 = vset_lane_f32(b2, b00, 1);
  1685. return vreinterpretq_m128_f32(vcombine_f32(a32, b20));
  1686. }
  1687. // NEON does not support a general purpose permute intrinsic
  1688. // Selects four specific single-precision, floating-point values from a and b,
  1689. // based on the mask i.
  1690. //
  1691. // C equivalent:
  1692. // __m128 _mm_shuffle_ps_default(__m128 a, __m128 b,
  1693. // __constrange(0, 255) int imm) {
  1694. // __m128 ret;
  1695. // ret[0] = a[imm & 0x3]; ret[1] = a[(imm >> 2) & 0x3];
  1696. // ret[2] = b[(imm >> 4) & 0x03]; ret[3] = b[(imm >> 6) & 0x03];
  1697. // return ret;
  1698. // }
  1699. //
  1700. // https://msdn.microsoft.com/en-us/library/vstudio/5f0858x0(v=vs.100).aspx
  1701. #define _mm_shuffle_ps_default(a, b, imm) \
  1702. __extension__({ \
  1703. float32x4_t ret; \
  1704. ret = vmovq_n_f32( \
  1705. vgetq_lane_f32(vreinterpretq_f32_m128(a), (imm) & (0x3))); \
  1706. ret = vsetq_lane_f32( \
  1707. vgetq_lane_f32(vreinterpretq_f32_m128(a), ((imm) >> 2) & 0x3), \
  1708. ret, 1); \
  1709. ret = vsetq_lane_f32( \
  1710. vgetq_lane_f32(vreinterpretq_f32_m128(b), ((imm) >> 4) & 0x3), \
  1711. ret, 2); \
  1712. ret = vsetq_lane_f32( \
  1713. vgetq_lane_f32(vreinterpretq_f32_m128(b), ((imm) >> 6) & 0x3), \
  1714. ret, 3); \
  1715. vreinterpretq_m128_f32(ret); \
  1716. })
  1717. // FORCE_INLINE __m128 _mm_shuffle_ps(__m128 a, __m128 b, __constrange(0,255)
  1718. // int imm)
  1719. #if __has_builtin(__builtin_shufflevector)
  1720. #define _mm_shuffle_ps(a, b, imm) \
  1721. __extension__({ \
  1722. float32x4_t _input1 = vreinterpretq_f32_m128(a); \
  1723. float32x4_t _input2 = vreinterpretq_f32_m128(b); \
  1724. float32x4_t _shuf = __builtin_shufflevector( \
  1725. _input1, _input2, (imm) & (0x3), ((imm) >> 2) & 0x3, \
  1726. (((imm) >> 4) & 0x3) + 4, (((imm) >> 6) & 0x3) + 4); \
  1727. vreinterpretq_m128_f32(_shuf); \
  1728. })
  1729. #else // generic
  1730. #define _mm_shuffle_ps(a, b, imm) \
  1731. __extension__({ \
  1732. __m128 ret; \
  1733. switch (imm) { \
  1734. case _MM_SHUFFLE(1, 0, 3, 2): \
  1735. ret = _mm_shuffle_ps_1032((a), (b)); \
  1736. break; \
  1737. case _MM_SHUFFLE(2, 3, 0, 1): \
  1738. ret = _mm_shuffle_ps_2301((a), (b)); \
  1739. break; \
  1740. case _MM_SHUFFLE(0, 3, 2, 1): \
  1741. ret = _mm_shuffle_ps_0321((a), (b)); \
  1742. break; \
  1743. case _MM_SHUFFLE(2, 1, 0, 3): \
  1744. ret = _mm_shuffle_ps_2103((a), (b)); \
  1745. break; \
  1746. case _MM_SHUFFLE(1, 0, 1, 0): \
  1747. ret = _mm_movelh_ps((a), (b)); \
  1748. break; \
  1749. case _MM_SHUFFLE(1, 0, 0, 1): \
  1750. ret = _mm_shuffle_ps_1001((a), (b)); \
  1751. break; \
  1752. case _MM_SHUFFLE(0, 1, 0, 1): \
  1753. ret = _mm_shuffle_ps_0101((a), (b)); \
  1754. break; \
  1755. case _MM_SHUFFLE(3, 2, 1, 0): \
  1756. ret = _mm_shuffle_ps_3210((a), (b)); \
  1757. break; \
  1758. case _MM_SHUFFLE(0, 0, 1, 1): \
  1759. ret = _mm_shuffle_ps_0011((a), (b)); \
  1760. break; \
  1761. case _MM_SHUFFLE(0, 0, 2, 2): \
  1762. ret = _mm_shuffle_ps_0022((a), (b)); \
  1763. break; \
  1764. case _MM_SHUFFLE(2, 2, 0, 0): \
  1765. ret = _mm_shuffle_ps_2200((a), (b)); \
  1766. break; \
  1767. case _MM_SHUFFLE(3, 2, 0, 2): \
  1768. ret = _mm_shuffle_ps_3202((a), (b)); \
  1769. break; \
  1770. case _MM_SHUFFLE(3, 2, 3, 2): \
  1771. ret = _mm_movehl_ps((b), (a)); \
  1772. break; \
  1773. case _MM_SHUFFLE(1, 1, 3, 3): \
  1774. ret = _mm_shuffle_ps_1133((a), (b)); \
  1775. break; \
  1776. case _MM_SHUFFLE(2, 0, 1, 0): \
  1777. ret = _mm_shuffle_ps_2010((a), (b)); \
  1778. break; \
  1779. case _MM_SHUFFLE(2, 0, 0, 1): \
  1780. ret = _mm_shuffle_ps_2001((a), (b)); \
  1781. break; \
  1782. case _MM_SHUFFLE(2, 0, 3, 2): \
  1783. ret = _mm_shuffle_ps_2032((a), (b)); \
  1784. break; \
  1785. default: \
  1786. ret = _mm_shuffle_ps_default((a), (b), (imm)); \
  1787. break; \
  1788. } \
  1789. ret; \
  1790. })
  1791. #endif
  1792. // Takes the upper 64 bits of a and places it in the low end of the result
  1793. // Takes the lower 64 bits of a and places it into the high end of the result.
  1794. FORCE_INLINE __m128i _mm_shuffle_epi_1032(__m128i a)
  1795. {
  1796. int32x2_t a32 = vget_high_s32(vreinterpretq_s32_m128i(a));
  1797. int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a));
  1798. return vreinterpretq_m128i_s32(vcombine_s32(a32, a10));
  1799. }
  1800. // takes the lower two 32-bit values from a and swaps them and places in low end
  1801. // of result takes the higher two 32 bit values from a and swaps them and places
  1802. // in high end of result.
  1803. FORCE_INLINE __m128i _mm_shuffle_epi_2301(__m128i a)
  1804. {
  1805. int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
  1806. int32x2_t a23 = vrev64_s32(vget_high_s32(vreinterpretq_s32_m128i(a)));
  1807. return vreinterpretq_m128i_s32(vcombine_s32(a01, a23));
  1808. }
  1809. // rotates the least significant 32 bits into the most signficant 32 bits, and
  1810. // shifts the rest down
  1811. FORCE_INLINE __m128i _mm_shuffle_epi_0321(__m128i a)
  1812. {
  1813. return vreinterpretq_m128i_s32(
  1814. vextq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(a), 1));
  1815. }
  1816. // rotates the most significant 32 bits into the least signficant 32 bits, and
  1817. // shifts the rest up
  1818. FORCE_INLINE __m128i _mm_shuffle_epi_2103(__m128i a)
  1819. {
  1820. return vreinterpretq_m128i_s32(
  1821. vextq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(a), 3));
  1822. }
  1823. // gets the lower 64 bits of a, and places it in the upper 64 bits
  1824. // gets the lower 64 bits of a and places it in the lower 64 bits
  1825. FORCE_INLINE __m128i _mm_shuffle_epi_1010(__m128i a)
  1826. {
  1827. int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a));
  1828. return vreinterpretq_m128i_s32(vcombine_s32(a10, a10));
  1829. }
  1830. // gets the lower 64 bits of a, swaps the 0 and 1 elements, and places it in the
  1831. // lower 64 bits gets the lower 64 bits of a, and places it in the upper 64 bits
  1832. FORCE_INLINE __m128i _mm_shuffle_epi_1001(__m128i a)
  1833. {
  1834. int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
  1835. int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a));
  1836. return vreinterpretq_m128i_s32(vcombine_s32(a01, a10));
  1837. }
  1838. // gets the lower 64 bits of a, swaps the 0 and 1 elements and places it in the
  1839. // upper 64 bits gets the lower 64 bits of a, swaps the 0 and 1 elements, and
  1840. // places it in the lower 64 bits
  1841. FORCE_INLINE __m128i _mm_shuffle_epi_0101(__m128i a)
  1842. {
  1843. int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
  1844. return vreinterpretq_m128i_s32(vcombine_s32(a01, a01));
  1845. }
  1846. FORCE_INLINE __m128i _mm_shuffle_epi_2211(__m128i a)
  1847. {
  1848. int32x2_t a11 = vdup_lane_s32(vget_low_s32(vreinterpretq_s32_m128i(a)), 1);
  1849. int32x2_t a22 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 0);
  1850. return vreinterpretq_m128i_s32(vcombine_s32(a11, a22));
  1851. }
  1852. FORCE_INLINE __m128i _mm_shuffle_epi_0122(__m128i a)
  1853. {
  1854. int32x2_t a22 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 0);
  1855. int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
  1856. return vreinterpretq_m128i_s32(vcombine_s32(a22, a01));
  1857. }
  1858. FORCE_INLINE __m128i _mm_shuffle_epi_3332(__m128i a)
  1859. {
  1860. int32x2_t a32 = vget_high_s32(vreinterpretq_s32_m128i(a));
  1861. int32x2_t a33 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 1);
  1862. return vreinterpretq_m128i_s32(vcombine_s32(a32, a33));
  1863. }
  1864. // Shuffle packed 8-bit integers in a according to shuffle control mask in the
  1865. // corresponding 8-bit element of b, and store the results in dst.
  1866. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shuffle_epi8
  1867. FORCE_INLINE __m128i _mm_shuffle_epi8(__m128i a, __m128i b)
  1868. {
  1869. int8x16_t tbl = vreinterpretq_s8_m128i(a); // input a
  1870. uint8x16_t idx = vreinterpretq_u8_m128i(b); // input b
  1871. uint8x16_t idx_masked =
  1872. vandq_u8(idx, vdupq_n_u8(0x8F)); // avoid using meaningless bits
  1873. #if defined(__aarch64__)
  1874. return vreinterpretq_m128i_s8(vqtbl1q_s8(tbl, idx_masked));
  1875. #elif defined(__GNUC__)
  1876. int8x16_t ret;
  1877. // %e and %f represent the even and odd D registers
  1878. // respectively.
  1879. __asm__ __volatile__(
  1880. "vtbl.8 %e[ret], {%e[tbl], %f[tbl]}, %e[idx]\n"
  1881. "vtbl.8 %f[ret], {%e[tbl], %f[tbl]}, %f[idx]\n"
  1882. : [ret] "=&w"(ret)
  1883. : [tbl] "w"(tbl), [idx] "w"(idx_masked));
  1884. return vreinterpretq_m128i_s8(ret);
  1885. #else
  1886. // use this line if testing on aarch64
  1887. int8x8x2_t a_split = {vget_low_s8(tbl), vget_high_s8(tbl)};
  1888. return vreinterpretq_m128i_s8(
  1889. vcombine_s8(vtbl2_s8(a_split, vget_low_u8(idx_masked)),
  1890. vtbl2_s8(a_split, vget_high_u8(idx_masked))));
  1891. #endif
  1892. }
  1893. // C equivalent:
  1894. // __m128i _mm_shuffle_epi32_default(__m128i a,
  1895. // __constrange(0, 255) int imm) {
  1896. // __m128i ret;
  1897. // ret[0] = a[imm & 0x3]; ret[1] = a[(imm >> 2) & 0x3];
  1898. // ret[2] = a[(imm >> 4) & 0x03]; ret[3] = a[(imm >> 6) & 0x03];
  1899. // return ret;
  1900. // }
  1901. #define _mm_shuffle_epi32_default(a, imm) \
  1902. __extension__({ \
  1903. int32x4_t ret; \
  1904. ret = vmovq_n_s32( \
  1905. vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm) & (0x3))); \
  1906. ret = vsetq_lane_s32( \
  1907. vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 2) & 0x3), \
  1908. ret, 1); \
  1909. ret = vsetq_lane_s32( \
  1910. vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 4) & 0x3), \
  1911. ret, 2); \
  1912. ret = vsetq_lane_s32( \
  1913. vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 6) & 0x3), \
  1914. ret, 3); \
  1915. vreinterpretq_m128i_s32(ret); \
  1916. })
  1917. // FORCE_INLINE __m128i _mm_shuffle_epi32_splat(__m128i a, __constrange(0,255)
  1918. // int imm)
  1919. #if defined(__aarch64__)
  1920. #define _mm_shuffle_epi32_splat(a, imm) \
  1921. __extension__({ \
  1922. vreinterpretq_m128i_s32( \
  1923. vdupq_laneq_s32(vreinterpretq_s32_m128i(a), (imm))); \
  1924. })
  1925. #else
  1926. #define _mm_shuffle_epi32_splat(a, imm) \
  1927. __extension__({ \
  1928. vreinterpretq_m128i_s32( \
  1929. vdupq_n_s32(vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm)))); \
  1930. })
  1931. #endif
  1932. // Shuffles the 4 signed or unsigned 32-bit integers in a as specified by imm.
  1933. // https://msdn.microsoft.com/en-us/library/56f67xbk%28v=vs.90%29.aspx
  1934. // FORCE_INLINE __m128i _mm_shuffle_epi32(__m128i a,
  1935. // __constrange(0,255) int imm)
  1936. #if __has_builtin(__builtin_shufflevector)
  1937. #define _mm_shuffle_epi32(a, imm) \
  1938. __extension__({ \
  1939. int32x4_t _input = vreinterpretq_s32_m128i(a); \
  1940. int32x4_t _shuf = __builtin_shufflevector( \
  1941. _input, _input, (imm) & (0x3), ((imm) >> 2) & 0x3, \
  1942. ((imm) >> 4) & 0x3, ((imm) >> 6) & 0x3); \
  1943. vreinterpretq_m128i_s32(_shuf); \
  1944. })
  1945. #else // generic
  1946. #define _mm_shuffle_epi32(a, imm) \
  1947. __extension__({ \
  1948. __m128i ret; \
  1949. switch (imm) { \
  1950. case _MM_SHUFFLE(1, 0, 3, 2): \
  1951. ret = _mm_shuffle_epi_1032((a)); \
  1952. break; \
  1953. case _MM_SHUFFLE(2, 3, 0, 1): \
  1954. ret = _mm_shuffle_epi_2301((a)); \
  1955. break; \
  1956. case _MM_SHUFFLE(0, 3, 2, 1): \
  1957. ret = _mm_shuffle_epi_0321((a)); \
  1958. break; \
  1959. case _MM_SHUFFLE(2, 1, 0, 3): \
  1960. ret = _mm_shuffle_epi_2103((a)); \
  1961. break; \
  1962. case _MM_SHUFFLE(1, 0, 1, 0): \
  1963. ret = _mm_shuffle_epi_1010((a)); \
  1964. break; \
  1965. case _MM_SHUFFLE(1, 0, 0, 1): \
  1966. ret = _mm_shuffle_epi_1001((a)); \
  1967. break; \
  1968. case _MM_SHUFFLE(0, 1, 0, 1): \
  1969. ret = _mm_shuffle_epi_0101((a)); \
  1970. break; \
  1971. case _MM_SHUFFLE(2, 2, 1, 1): \
  1972. ret = _mm_shuffle_epi_2211((a)); \
  1973. break; \
  1974. case _MM_SHUFFLE(0, 1, 2, 2): \
  1975. ret = _mm_shuffle_epi_0122((a)); \
  1976. break; \
  1977. case _MM_SHUFFLE(3, 3, 3, 2): \
  1978. ret = _mm_shuffle_epi_3332((a)); \
  1979. break; \
  1980. case _MM_SHUFFLE(0, 0, 0, 0): \
  1981. ret = _mm_shuffle_epi32_splat((a), 0); \
  1982. break; \
  1983. case _MM_SHUFFLE(1, 1, 1, 1): \
  1984. ret = _mm_shuffle_epi32_splat((a), 1); \
  1985. break; \
  1986. case _MM_SHUFFLE(2, 2, 2, 2): \
  1987. ret = _mm_shuffle_epi32_splat((a), 2); \
  1988. break; \
  1989. case _MM_SHUFFLE(3, 3, 3, 3): \
  1990. ret = _mm_shuffle_epi32_splat((a), 3); \
  1991. break; \
  1992. default: \
  1993. ret = _mm_shuffle_epi32_default((a), (imm)); \
  1994. break; \
  1995. } \
  1996. ret; \
  1997. })
  1998. #endif
  1999. // Shuffles the lower 4 signed or unsigned 16-bit integers in a as specified
  2000. // by imm.
  2001. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/y41dkk37(v=vs.100)
  2002. // FORCE_INLINE __m128i _mm_shufflelo_epi16_function(__m128i a,
  2003. // __constrange(0,255) int
  2004. // imm)
  2005. #define _mm_shufflelo_epi16_function(a, imm) \
  2006. __extension__({ \
  2007. int16x8_t ret = vreinterpretq_s16_m128i(a); \
  2008. int16x4_t lowBits = vget_low_s16(ret); \
  2009. ret = vsetq_lane_s16(vget_lane_s16(lowBits, (imm) & (0x3)), ret, 0); \
  2010. ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm) >> 2) & 0x3), ret, \
  2011. 1); \
  2012. ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm) >> 4) & 0x3), ret, \
  2013. 2); \
  2014. ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm) >> 6) & 0x3), ret, \
  2015. 3); \
  2016. vreinterpretq_m128i_s16(ret); \
  2017. })
  2018. // FORCE_INLINE __m128i _mm_shufflelo_epi16(__m128i a,
  2019. // __constrange(0,255) int imm)
  2020. #if __has_builtin(__builtin_shufflevector)
  2021. #define _mm_shufflelo_epi16(a, imm) \
  2022. __extension__({ \
  2023. int16x8_t _input = vreinterpretq_s16_m128i(a); \
  2024. int16x8_t _shuf = __builtin_shufflevector( \
  2025. _input, _input, ((imm) & (0x3)), (((imm) >> 2) & 0x3), \
  2026. (((imm) >> 4) & 0x3), (((imm) >> 6) & 0x3), 4, 5, 6, 7); \
  2027. vreinterpretq_m128i_s16(_shuf); \
  2028. })
  2029. #else // generic
  2030. #define _mm_shufflelo_epi16(a, imm) _mm_shufflelo_epi16_function((a), (imm))
  2031. #endif
  2032. // Shuffles the upper 4 signed or unsigned 16-bit integers in a as specified
  2033. // by imm.
  2034. // https://msdn.microsoft.com/en-us/library/13ywktbs(v=vs.100).aspx
  2035. // FORCE_INLINE __m128i _mm_shufflehi_epi16_function(__m128i a,
  2036. // __constrange(0,255) int
  2037. // imm)
  2038. #define _mm_shufflehi_epi16_function(a, imm) \
  2039. __extension__({ \
  2040. int16x8_t ret = vreinterpretq_s16_m128i(a); \
  2041. int16x4_t highBits = vget_high_s16(ret); \
  2042. ret = vsetq_lane_s16(vget_lane_s16(highBits, (imm) & (0x3)), ret, 4); \
  2043. ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 2) & 0x3), ret, \
  2044. 5); \
  2045. ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 4) & 0x3), ret, \
  2046. 6); \
  2047. ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 6) & 0x3), ret, \
  2048. 7); \
  2049. vreinterpretq_m128i_s16(ret); \
  2050. })
  2051. // FORCE_INLINE __m128i _mm_shufflehi_epi16(__m128i a,
  2052. // __constrange(0,255) int imm)
  2053. #if __has_builtin(__builtin_shufflevector)
  2054. #define _mm_shufflehi_epi16(a, imm) \
  2055. __extension__({ \
  2056. int16x8_t _input = vreinterpretq_s16_m128i(a); \
  2057. int16x8_t _shuf = __builtin_shufflevector( \
  2058. _input, _input, 0, 1, 2, 3, ((imm) & (0x3)) + 4, \
  2059. (((imm) >> 2) & 0x3) + 4, (((imm) >> 4) & 0x3) + 4, \
  2060. (((imm) >> 6) & 0x3) + 4); \
  2061. vreinterpretq_m128i_s16(_shuf); \
  2062. })
  2063. #else // generic
  2064. #define _mm_shufflehi_epi16(a, imm) _mm_shufflehi_epi16_function((a), (imm))
  2065. #endif
  2066. // Shuffle double-precision (64-bit) floating-point elements using the control
  2067. // in imm8, and store the results in dst.
  2068. //
  2069. // dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64]
  2070. // dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64]
  2071. //
  2072. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shuffle_pd
  2073. #if __has_builtin(__builtin_shufflevector)
  2074. #define _mm_shuffle_pd(a, b, imm8) \
  2075. vreinterpretq_m128d_s64(__builtin_shufflevector( \
  2076. vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b), imm8 & 0x1, \
  2077. ((imm8 & 0x2) >> 1) + 2))
  2078. #else
  2079. #define _mm_shuffle_pd(a, b, imm8) \
  2080. _mm_castsi128_pd(_mm_set_epi64x( \
  2081. vgetq_lane_s64(vreinterpretq_s64_m128d(b), (imm8 & 0x2) >> 1), \
  2082. vgetq_lane_s64(vreinterpretq_s64_m128d(a), imm8 & 0x1)))
  2083. #endif
  2084. // Blend packed 16-bit integers from a and b using control mask imm8, and store
  2085. // the results in dst.
  2086. //
  2087. // FOR j := 0 to 7
  2088. // i := j*16
  2089. // IF imm8[j]
  2090. // dst[i+15:i] := b[i+15:i]
  2091. // ELSE
  2092. // dst[i+15:i] := a[i+15:i]
  2093. // FI
  2094. // ENDFOR
  2095. // FORCE_INLINE __m128i _mm_blend_epi16(__m128i a, __m128i b,
  2096. // __constrange(0,255) int imm)
  2097. #define _mm_blend_epi16(a, b, imm) \
  2098. __extension__({ \
  2099. const uint16_t _mask[8] = {((imm) & (1 << 0)) ? 0xFFFF : 0x0000, \
  2100. ((imm) & (1 << 1)) ? 0xFFFF : 0x0000, \
  2101. ((imm) & (1 << 2)) ? 0xFFFF : 0x0000, \
  2102. ((imm) & (1 << 3)) ? 0xFFFF : 0x0000, \
  2103. ((imm) & (1 << 4)) ? 0xFFFF : 0x0000, \
  2104. ((imm) & (1 << 5)) ? 0xFFFF : 0x0000, \
  2105. ((imm) & (1 << 6)) ? 0xFFFF : 0x0000, \
  2106. ((imm) & (1 << 7)) ? 0xFFFF : 0x0000}; \
  2107. uint16x8_t _mask_vec = vld1q_u16(_mask); \
  2108. uint16x8_t _a = vreinterpretq_u16_m128i(a); \
  2109. uint16x8_t _b = vreinterpretq_u16_m128i(b); \
  2110. vreinterpretq_m128i_u16(vbslq_u16(_mask_vec, _b, _a)); \
  2111. })
  2112. // Blend packed 8-bit integers from a and b using mask, and store the results in
  2113. // dst.
  2114. //
  2115. // FOR j := 0 to 15
  2116. // i := j*8
  2117. // IF mask[i+7]
  2118. // dst[i+7:i] := b[i+7:i]
  2119. // ELSE
  2120. // dst[i+7:i] := a[i+7:i]
  2121. // FI
  2122. // ENDFOR
  2123. FORCE_INLINE __m128i _mm_blendv_epi8(__m128i _a, __m128i _b, __m128i _mask)
  2124. {
  2125. // Use a signed shift right to create a mask with the sign bit
  2126. uint8x16_t mask =
  2127. vreinterpretq_u8_s8(vshrq_n_s8(vreinterpretq_s8_m128i(_mask), 7));
  2128. uint8x16_t a = vreinterpretq_u8_m128i(_a);
  2129. uint8x16_t b = vreinterpretq_u8_m128i(_b);
  2130. return vreinterpretq_m128i_u8(vbslq_u8(mask, b, a));
  2131. }
  2132. /* Shifts */
  2133. // Shift packed 16-bit integers in a right by imm while shifting in sign
  2134. // bits, and store the results in dst.
  2135. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srai_epi16
  2136. FORCE_INLINE __m128i _mm_srai_epi16(__m128i a, int imm)
  2137. {
  2138. const int count = (imm & ~15) ? 15 : imm;
  2139. return (__m128i) vshlq_s16((int16x8_t) a, vdupq_n_s16(-count));
  2140. }
  2141. // Shifts the 8 signed or unsigned 16-bit integers in a left by count bits while
  2142. // shifting in zeros.
  2143. //
  2144. // r0 := a0 << count
  2145. // r1 := a1 << count
  2146. // ...
  2147. // r7 := a7 << count
  2148. //
  2149. // https://msdn.microsoft.com/en-us/library/es73bcsy(v=vs.90).aspx
  2150. #define _mm_slli_epi16(a, imm) \
  2151. __extension__({ \
  2152. __m128i ret; \
  2153. if (unlikely((imm)) <= 0) { \
  2154. ret = a; \
  2155. } \
  2156. if (unlikely((imm) > 15)) { \
  2157. ret = _mm_setzero_si128(); \
  2158. } else { \
  2159. ret = vreinterpretq_m128i_s16( \
  2160. vshlq_n_s16(vreinterpretq_s16_m128i(a), (imm))); \
  2161. } \
  2162. ret; \
  2163. })
  2164. // Shifts the 4 signed or unsigned 32-bit integers in a left by count bits while
  2165. // shifting in zeros. :
  2166. // https://msdn.microsoft.com/en-us/library/z2k3bbtb%28v=vs.90%29.aspx
  2167. // FORCE_INLINE __m128i _mm_slli_epi32(__m128i a, __constrange(0,255) int imm)
  2168. FORCE_INLINE __m128i _mm_slli_epi32(__m128i a, int imm)
  2169. {
  2170. if (unlikely(imm <= 0)) /* TODO: add constant range macro: [0, 255] */
  2171. return a;
  2172. if (unlikely(imm > 31))
  2173. return _mm_setzero_si128();
  2174. return vreinterpretq_m128i_s32(
  2175. vshlq_s32(vreinterpretq_s32_m128i(a), vdupq_n_s32(imm)));
  2176. }
  2177. // Shift packed 64-bit integers in a left by imm8 while shifting in zeros, and
  2178. // store the results in dst.
  2179. FORCE_INLINE __m128i _mm_slli_epi64(__m128i a, int imm)
  2180. {
  2181. if (unlikely(imm <= 0)) /* TODO: add constant range macro: [0, 255] */
  2182. return a;
  2183. if (unlikely(imm > 63))
  2184. return _mm_setzero_si128();
  2185. return vreinterpretq_m128i_s64(
  2186. vshlq_s64(vreinterpretq_s64_m128i(a), vdupq_n_s64(imm)));
  2187. }
  2188. // Shift packed 16-bit integers in a right by imm8 while shifting in zeros, and
  2189. // store the results in dst.
  2190. //
  2191. // FOR j := 0 to 7
  2192. // i := j*16
  2193. // IF imm8[7:0] > 15
  2194. // dst[i+15:i] := 0
  2195. // ELSE
  2196. // dst[i+15:i] := ZeroExtend16(a[i+15:i] >> imm8[7:0])
  2197. // FI
  2198. // ENDFOR
  2199. //
  2200. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srli_epi16
  2201. #define _mm_srli_epi16(a, imm) \
  2202. __extension__({ \
  2203. __m128i ret; \
  2204. if (unlikely(imm) == 0) { \
  2205. ret = a; \
  2206. } \
  2207. if (likely(0 < (imm) && (imm) < 16)) { \
  2208. ret = vreinterpretq_m128i_u16( \
  2209. vshlq_u16(vreinterpretq_u16_m128i(a), vdupq_n_s16(-imm))); \
  2210. } else { \
  2211. ret = _mm_setzero_si128(); \
  2212. } \
  2213. ret; \
  2214. })
  2215. // Shift packed 32-bit integers in a right by imm8 while shifting in zeros, and
  2216. // store the results in dst.
  2217. //
  2218. // FOR j := 0 to 3
  2219. // i := j*32
  2220. // IF imm8[7:0] > 31
  2221. // dst[i+31:i] := 0
  2222. // ELSE
  2223. // dst[i+31:i] := ZeroExtend32(a[i+31:i] >> imm8[7:0])
  2224. // FI
  2225. // ENDFOR
  2226. //
  2227. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srli_epi32
  2228. // FORCE_INLINE __m128i _mm_srli_epi32(__m128i a, __constrange(0,255) int imm)
  2229. #define _mm_srli_epi32(a, imm) \
  2230. __extension__({ \
  2231. __m128i ret; \
  2232. if (unlikely((imm) == 0)) { \
  2233. ret = a; \
  2234. } \
  2235. if (likely(0 < (imm) && (imm) < 32)) { \
  2236. ret = vreinterpretq_m128i_u32( \
  2237. vshlq_u32(vreinterpretq_u32_m128i(a), vdupq_n_s32(-imm))); \
  2238. } else { \
  2239. ret = _mm_setzero_si128(); \
  2240. } \
  2241. ret; \
  2242. })
  2243. // Shift packed 64-bit integers in a right by imm8 while shifting in zeros, and
  2244. // store the results in dst.
  2245. //
  2246. // FOR j := 0 to 1
  2247. // i := j*64
  2248. // IF imm8[7:0] > 63
  2249. // dst[i+63:i] := 0
  2250. // ELSE
  2251. // dst[i+63:i] := ZeroExtend64(a[i+63:i] >> imm8[7:0])
  2252. // FI
  2253. // ENDFOR
  2254. //
  2255. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srli_epi64
  2256. #define _mm_srli_epi64(a, imm) \
  2257. __extension__({ \
  2258. __m128i ret; \
  2259. if (unlikely((imm) == 0)) { \
  2260. ret = a; \
  2261. } \
  2262. if (likely(0 < (imm) && (imm) < 64)) { \
  2263. ret = vreinterpretq_m128i_u64( \
  2264. vshlq_u64(vreinterpretq_u64_m128i(a), vdupq_n_s64(-imm))); \
  2265. } else { \
  2266. ret = _mm_setzero_si128(); \
  2267. } \
  2268. ret; \
  2269. })
  2270. // Shift packed 32-bit integers in a right by imm8 while shifting in sign bits,
  2271. // and store the results in dst.
  2272. //
  2273. // FOR j := 0 to 3
  2274. // i := j*32
  2275. // IF imm8[7:0] > 31
  2276. // dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0)
  2277. // ELSE
  2278. // dst[i+31:i] := SignExtend32(a[i+31:i] >> imm8[7:0])
  2279. // FI
  2280. // ENDFOR
  2281. //
  2282. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srai_epi32
  2283. // FORCE_INLINE __m128i _mm_srai_epi32(__m128i a, __constrange(0,255) int imm)
  2284. #define _mm_srai_epi32(a, imm) \
  2285. __extension__({ \
  2286. __m128i ret; \
  2287. if (unlikely((imm) == 0)) { \
  2288. ret = a; \
  2289. } \
  2290. if (likely(0 < (imm) && (imm) < 32)) { \
  2291. ret = vreinterpretq_m128i_s32( \
  2292. vshlq_s32(vreinterpretq_s32_m128i(a), vdupq_n_s32(-imm))); \
  2293. } else { \
  2294. ret = vreinterpretq_m128i_s32( \
  2295. vshrq_n_s32(vreinterpretq_s32_m128i(a), 31)); \
  2296. } \
  2297. ret; \
  2298. })
  2299. // Shifts the 128 - bit value in a right by imm bytes while shifting in
  2300. // zeros.imm must be an immediate.
  2301. //
  2302. // r := srl(a, imm*8)
  2303. //
  2304. // https://msdn.microsoft.com/en-us/library/305w28yz(v=vs.100).aspx
  2305. // FORCE_INLINE _mm_srli_si128(__m128i a, __constrange(0,255) int imm)
  2306. #define _mm_srli_si128(a, imm) \
  2307. __extension__({ \
  2308. __m128i ret; \
  2309. if (unlikely((imm) <= 0)) { \
  2310. ret = a; \
  2311. } \
  2312. if (unlikely((imm) > 15)) { \
  2313. ret = _mm_setzero_si128(); \
  2314. } else { \
  2315. ret = vreinterpretq_m128i_s8( \
  2316. vextq_s8(vreinterpretq_s8_m128i(a), vdupq_n_s8(0), (imm))); \
  2317. } \
  2318. ret; \
  2319. })
  2320. // Shifts the 128-bit value in a left by imm bytes while shifting in zeros. imm
  2321. // must be an immediate.
  2322. //
  2323. // r := a << (imm * 8)
  2324. //
  2325. // https://msdn.microsoft.com/en-us/library/34d3k2kt(v=vs.100).aspx
  2326. // FORCE_INLINE __m128i _mm_slli_si128(__m128i a, __constrange(0,255) int imm)
  2327. #define _mm_slli_si128(a, imm) \
  2328. __extension__({ \
  2329. __m128i ret; \
  2330. if (unlikely((imm) <= 0)) { \
  2331. ret = a; \
  2332. } \
  2333. if (unlikely((imm) > 15)) { \
  2334. ret = _mm_setzero_si128(); \
  2335. } else { \
  2336. ret = vreinterpretq_m128i_s8(vextq_s8( \
  2337. vdupq_n_s8(0), vreinterpretq_s8_m128i(a), 16 - (imm))); \
  2338. } \
  2339. ret; \
  2340. })
  2341. // Shifts the 8 signed or unsigned 16-bit integers in a left by count bits while
  2342. // shifting in zeros.
  2343. //
  2344. // r0 := a0 << count
  2345. // r1 := a1 << count
  2346. // ...
  2347. // r7 := a7 << count
  2348. //
  2349. // https://msdn.microsoft.com/en-us/library/c79w388h(v%3dvs.90).aspx
  2350. FORCE_INLINE __m128i _mm_sll_epi16(__m128i a, __m128i count)
  2351. {
  2352. uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
  2353. if (unlikely(c > 15))
  2354. return _mm_setzero_si128();
  2355. int16x8_t vc = vdupq_n_s16((int16_t) c);
  2356. return vreinterpretq_m128i_s16(vshlq_s16(vreinterpretq_s16_m128i(a), vc));
  2357. }
  2358. // Shifts the 4 signed or unsigned 32-bit integers in a left by count bits while
  2359. // shifting in zeros.
  2360. //
  2361. // r0 := a0 << count
  2362. // r1 := a1 << count
  2363. // r2 := a2 << count
  2364. // r3 := a3 << count
  2365. //
  2366. // https://msdn.microsoft.com/en-us/library/6fe5a6s9(v%3dvs.90).aspx
  2367. FORCE_INLINE __m128i _mm_sll_epi32(__m128i a, __m128i count)
  2368. {
  2369. uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
  2370. if (unlikely(c > 31))
  2371. return _mm_setzero_si128();
  2372. int32x4_t vc = vdupq_n_s32((int32_t) c);
  2373. return vreinterpretq_m128i_s32(vshlq_s32(vreinterpretq_s32_m128i(a), vc));
  2374. }
  2375. // Shifts the 2 signed or unsigned 64-bit integers in a left by count bits while
  2376. // shifting in zeros.
  2377. //
  2378. // r0 := a0 << count
  2379. // r1 := a1 << count
  2380. //
  2381. // https://msdn.microsoft.com/en-us/library/6ta9dffd(v%3dvs.90).aspx
  2382. FORCE_INLINE __m128i _mm_sll_epi64(__m128i a, __m128i count)
  2383. {
  2384. uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
  2385. if (unlikely(c > 63))
  2386. return _mm_setzero_si128();
  2387. int64x2_t vc = vdupq_n_s64((int64_t) c);
  2388. return vreinterpretq_m128i_s64(vshlq_s64(vreinterpretq_s64_m128i(a), vc));
  2389. }
  2390. // Shifts the 8 signed or unsigned 16-bit integers in a right by count bits
  2391. // while shifting in zeros.
  2392. //
  2393. // r0 := srl(a0, count)
  2394. // r1 := srl(a1, count)
  2395. // ...
  2396. // r7 := srl(a7, count)
  2397. //
  2398. // https://msdn.microsoft.com/en-us/library/wd5ax830(v%3dvs.90).aspx
  2399. FORCE_INLINE __m128i _mm_srl_epi16(__m128i a, __m128i count)
  2400. {
  2401. uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
  2402. if (unlikely(c > 15))
  2403. return _mm_setzero_si128();
  2404. int16x8_t vc = vdupq_n_s16(-(int16_t) c);
  2405. return vreinterpretq_m128i_u16(vshlq_u16(vreinterpretq_u16_m128i(a), vc));
  2406. }
  2407. // Shifts the 4 signed or unsigned 32-bit integers in a right by count bits
  2408. // while shifting in zeros.
  2409. //
  2410. // r0 := srl(a0, count)
  2411. // r1 := srl(a1, count)
  2412. // r2 := srl(a2, count)
  2413. // r3 := srl(a3, count)
  2414. //
  2415. // https://msdn.microsoft.com/en-us/library/a9cbttf4(v%3dvs.90).aspx
  2416. FORCE_INLINE __m128i _mm_srl_epi32(__m128i a, __m128i count)
  2417. {
  2418. uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
  2419. if (unlikely(c > 31))
  2420. return _mm_setzero_si128();
  2421. int32x4_t vc = vdupq_n_s32(-(int32_t) c);
  2422. return vreinterpretq_m128i_u32(vshlq_u32(vreinterpretq_u32_m128i(a), vc));
  2423. }
  2424. // Shifts the 2 signed or unsigned 64-bit integers in a right by count bits
  2425. // while shifting in zeros.
  2426. //
  2427. // r0 := srl(a0, count)
  2428. // r1 := srl(a1, count)
  2429. //
  2430. // https://msdn.microsoft.com/en-us/library/yf6cf9k8(v%3dvs.90).aspx
  2431. FORCE_INLINE __m128i _mm_srl_epi64(__m128i a, __m128i count)
  2432. {
  2433. uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
  2434. if (unlikely(c > 63))
  2435. return _mm_setzero_si128();
  2436. int64x2_t vc = vdupq_n_s64(-(int64_t) c);
  2437. return vreinterpretq_m128i_u64(vshlq_u64(vreinterpretq_u64_m128i(a), vc));
  2438. }
  2439. // NEON does not provide a version of this function.
  2440. // Creates a 16-bit mask from the most significant bits of the 16 signed or
  2441. // unsigned 8-bit integers in a and zero extends the upper bits.
  2442. // https://msdn.microsoft.com/en-us/library/vstudio/s090c8fk(v=vs.100).aspx
  2443. FORCE_INLINE int _mm_movemask_epi8(__m128i a)
  2444. {
  2445. // Use increasingly wide shifts+adds to collect the sign bits
  2446. // together.
  2447. // Since the widening shifts would be rather confusing to follow in little
  2448. // endian, everything will be illustrated in big endian order instead. This
  2449. // has a different result - the bits would actually be reversed on a big
  2450. // endian machine.
  2451. // Starting input (only half the elements are shown):
  2452. // 89 ff 1d c0 00 10 99 33
  2453. uint8x16_t input = vreinterpretq_u8_m128i(a);
  2454. // Shift out everything but the sign bits with an unsigned shift right.
  2455. //
  2456. // Bytes of the vector::
  2457. // 89 ff 1d c0 00 10 99 33
  2458. // \ \ \ \ \ \ \ \ high_bits = (uint16x4_t)(input >> 7)
  2459. // | | | | | | | |
  2460. // 01 01 00 01 00 00 01 00
  2461. //
  2462. // Bits of first important lane(s):
  2463. // 10001001 (89)
  2464. // \______
  2465. // |
  2466. // 00000001 (01)
  2467. uint16x8_t high_bits = vreinterpretq_u16_u8(vshrq_n_u8(input, 7));
  2468. // Merge the even lanes together with a 16-bit unsigned shift right + add.
  2469. // 'xx' represents garbage data which will be ignored in the final result.
  2470. // In the important bytes, the add functions like a binary OR.
  2471. //
  2472. // 01 01 00 01 00 00 01 00
  2473. // \_ | \_ | \_ | \_ | paired16 = (uint32x4_t)(input + (input >> 7))
  2474. // \| \| \| \|
  2475. // xx 03 xx 01 xx 00 xx 02
  2476. //
  2477. // 00000001 00000001 (01 01)
  2478. // \_______ |
  2479. // \|
  2480. // xxxxxxxx xxxxxx11 (xx 03)
  2481. uint32x4_t paired16 =
  2482. vreinterpretq_u32_u16(vsraq_n_u16(high_bits, high_bits, 7));
  2483. // Repeat with a wider 32-bit shift + add.
  2484. // xx 03 xx 01 xx 00 xx 02
  2485. // \____ | \____ | paired32 = (uint64x1_t)(paired16 + (paired16 >>
  2486. // 14))
  2487. // \| \|
  2488. // xx xx xx 0d xx xx xx 02
  2489. //
  2490. // 00000011 00000001 (03 01)
  2491. // \\_____ ||
  2492. // '----.\||
  2493. // xxxxxxxx xxxx1101 (xx 0d)
  2494. uint64x2_t paired32 =
  2495. vreinterpretq_u64_u32(vsraq_n_u32(paired16, paired16, 14));
  2496. // Last, an even wider 64-bit shift + add to get our result in the low 8 bit
  2497. // lanes. xx xx xx 0d xx xx xx 02
  2498. // \_________ | paired64 = (uint8x8_t)(paired32 + (paired32 >>
  2499. // 28))
  2500. // \|
  2501. // xx xx xx xx xx xx xx d2
  2502. //
  2503. // 00001101 00000010 (0d 02)
  2504. // \ \___ | |
  2505. // '---. \| |
  2506. // xxxxxxxx 11010010 (xx d2)
  2507. uint8x16_t paired64 =
  2508. vreinterpretq_u8_u64(vsraq_n_u64(paired32, paired32, 28));
  2509. // Extract the low 8 bits from each 64-bit lane with 2 8-bit extracts.
  2510. // xx xx xx xx xx xx xx d2
  2511. // || return paired64[0]
  2512. // d2
  2513. // Note: Little endian would return the correct value 4b (01001011) instead.
  2514. return vgetq_lane_u8(paired64, 0) | ((int) vgetq_lane_u8(paired64, 8) << 8);
  2515. }
  2516. // Copy the lower 64-bit integer in a to dst.
  2517. //
  2518. // dst[63:0] := a[63:0]
  2519. //
  2520. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movepi64_pi64
  2521. FORCE_INLINE __m64 _mm_movepi64_pi64(__m128i a)
  2522. {
  2523. return vreinterpret_m64_s64(vget_low_s64(vreinterpretq_s64_m128i(a)));
  2524. }
  2525. // Copy the 64-bit integer a to the lower element of dst, and zero the upper
  2526. // element.
  2527. //
  2528. // dst[63:0] := a[63:0]
  2529. // dst[127:64] := 0
  2530. //
  2531. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movpi64_epi64
  2532. FORCE_INLINE __m128i _mm_movpi64_epi64(__m64 a)
  2533. {
  2534. return vreinterpretq_m128i_s64(
  2535. vcombine_s64(vreinterpret_s64_m64(a), vdup_n_s64(0)));
  2536. }
  2537. // NEON does not provide this method
  2538. // Creates a 4-bit mask from the most significant bits of the four
  2539. // single-precision, floating-point values.
  2540. // https://msdn.microsoft.com/en-us/library/vstudio/4490ys29(v=vs.100).aspx
  2541. FORCE_INLINE int _mm_movemask_ps(__m128 a)
  2542. {
  2543. uint32x4_t input = vreinterpretq_u32_m128(a);
  2544. #if defined(__aarch64__)
  2545. static const int32x4_t shift = {0, 1, 2, 3};
  2546. uint32x4_t tmp = vshrq_n_u32(input, 31);
  2547. return vaddvq_u32(vshlq_u32(tmp, shift));
  2548. #else
  2549. // Uses the exact same method as _mm_movemask_epi8, see that for details.
  2550. // Shift out everything but the sign bits with a 32-bit unsigned shift
  2551. // right.
  2552. uint64x2_t high_bits = vreinterpretq_u64_u32(vshrq_n_u32(input, 31));
  2553. // Merge the two pairs together with a 64-bit unsigned shift right + add.
  2554. uint8x16_t paired =
  2555. vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31));
  2556. // Extract the result.
  2557. return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2);
  2558. #endif
  2559. }
  2560. // Compute the bitwise NOT of a and then AND with a 128-bit vector containing
  2561. // all 1's, and return 1 if the result is zero, otherwise return 0.
  2562. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_test_all_ones
  2563. FORCE_INLINE int _mm_test_all_ones(__m128i a)
  2564. {
  2565. return (uint64_t)(vgetq_lane_s64(a, 0) & vgetq_lane_s64(a, 1)) ==
  2566. ~(uint64_t) 0;
  2567. }
  2568. // Compute the bitwise AND of 128 bits (representing integer data) in a and
  2569. // mask, and return 1 if the result is zero, otherwise return 0.
  2570. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_test_all_zeros
  2571. FORCE_INLINE int _mm_test_all_zeros(__m128i a, __m128i mask)
  2572. {
  2573. int64x2_t a_and_mask =
  2574. vandq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(mask));
  2575. return (vgetq_lane_s64(a_and_mask, 0) | vgetq_lane_s64(a_and_mask, 1)) ? 0
  2576. : 1;
  2577. }
  2578. /* Math operations */
  2579. // Subtracts the four single-precision, floating-point values of a and b.
  2580. //
  2581. // r0 := a0 - b0
  2582. // r1 := a1 - b1
  2583. // r2 := a2 - b2
  2584. // r3 := a3 - b3
  2585. //
  2586. // https://msdn.microsoft.com/en-us/library/vstudio/1zad2k61(v=vs.100).aspx
  2587. FORCE_INLINE __m128 _mm_sub_ps(__m128 a, __m128 b)
  2588. {
  2589. return vreinterpretq_m128_f32(
  2590. vsubq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  2591. }
  2592. // Subtract the lower single-precision (32-bit) floating-point element in b from
  2593. // the lower single-precision (32-bit) floating-point element in a, store the
  2594. // result in the lower element of dst, and copy the upper 3 packed elements from
  2595. // a to the upper elements of dst.
  2596. //
  2597. // dst[31:0] := a[31:0] - b[31:0]
  2598. // dst[127:32] := a[127:32]
  2599. //
  2600. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_ss
  2601. FORCE_INLINE __m128 _mm_sub_ss(__m128 a, __m128 b)
  2602. {
  2603. return _mm_move_ss(a, _mm_sub_ps(a, b));
  2604. }
  2605. // Subtract 2 packed 64-bit integers in b from 2 packed 64-bit integers in a,
  2606. // and store the results in dst.
  2607. // r0 := a0 - b0
  2608. // r1 := a1 - b1
  2609. FORCE_INLINE __m128i _mm_sub_epi64(__m128i a, __m128i b)
  2610. {
  2611. return vreinterpretq_m128i_s64(
  2612. vsubq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b)));
  2613. }
  2614. // Subtracts the 4 signed or unsigned 32-bit integers of b from the 4 signed or
  2615. // unsigned 32-bit integers of a.
  2616. //
  2617. // r0 := a0 - b0
  2618. // r1 := a1 - b1
  2619. // r2 := a2 - b2
  2620. // r3 := a3 - b3
  2621. //
  2622. // https://msdn.microsoft.com/en-us/library/vstudio/fhh866h0(v=vs.100).aspx
  2623. FORCE_INLINE __m128i _mm_sub_epi32(__m128i a, __m128i b)
  2624. {
  2625. return vreinterpretq_m128i_s32(
  2626. vsubq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
  2627. }
  2628. // Subtract packed 16-bit integers in b from packed 16-bit integers in a, and
  2629. // store the results in dst.
  2630. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_epi16
  2631. FORCE_INLINE __m128i _mm_sub_epi16(__m128i a, __m128i b)
  2632. {
  2633. return vreinterpretq_m128i_s16(
  2634. vsubq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
  2635. }
  2636. // Subtract packed 8-bit integers in b from packed 8-bit integers in a, and
  2637. // store the results in dst.
  2638. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_epi8
  2639. FORCE_INLINE __m128i _mm_sub_epi8(__m128i a, __m128i b)
  2640. {
  2641. return vreinterpretq_m128i_s8(
  2642. vsubq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
  2643. }
  2644. // Subtract 64-bit integer b from 64-bit integer a, and store the result in dst.
  2645. //
  2646. // dst[63:0] := a[63:0] - b[63:0]
  2647. //
  2648. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_si64
  2649. FORCE_INLINE __m64 _mm_sub_si64(__m64 a, __m64 b)
  2650. {
  2651. return vreinterpret_m64_s64(
  2652. vsub_s64(vreinterpret_s64_m64(a), vreinterpret_s64_m64(b)));
  2653. }
  2654. // Subtracts the 8 unsigned 16-bit integers of bfrom the 8 unsigned 16-bit
  2655. // integers of a and saturates..
  2656. // https://technet.microsoft.com/en-us/subscriptions/index/f44y0s19(v=vs.90).aspx
  2657. FORCE_INLINE __m128i _mm_subs_epu16(__m128i a, __m128i b)
  2658. {
  2659. return vreinterpretq_m128i_u16(
  2660. vqsubq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b)));
  2661. }
  2662. // Subtracts the 16 unsigned 8-bit integers of b from the 16 unsigned 8-bit
  2663. // integers of a and saturates.
  2664. //
  2665. // r0 := UnsignedSaturate(a0 - b0)
  2666. // r1 := UnsignedSaturate(a1 - b1)
  2667. // ...
  2668. // r15 := UnsignedSaturate(a15 - b15)
  2669. //
  2670. // https://technet.microsoft.com/en-us/subscriptions/yadkxc18(v=vs.90)
  2671. FORCE_INLINE __m128i _mm_subs_epu8(__m128i a, __m128i b)
  2672. {
  2673. return vreinterpretq_m128i_u8(
  2674. vqsubq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
  2675. }
  2676. // Subtracts the 16 signed 8-bit integers of b from the 16 signed 8-bit integers
  2677. // of a and saturates.
  2678. //
  2679. // r0 := SignedSaturate(a0 - b0)
  2680. // r1 := SignedSaturate(a1 - b1)
  2681. // ...
  2682. // r15 := SignedSaturate(a15 - b15)
  2683. //
  2684. // https://technet.microsoft.com/en-us/subscriptions/by7kzks1(v=vs.90)
  2685. FORCE_INLINE __m128i _mm_subs_epi8(__m128i a, __m128i b)
  2686. {
  2687. return vreinterpretq_m128i_s8(
  2688. vqsubq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
  2689. }
  2690. // Subtracts the 8 signed 16-bit integers of b from the 8 signed 16-bit integers
  2691. // of a and saturates.
  2692. //
  2693. // r0 := SignedSaturate(a0 - b0)
  2694. // r1 := SignedSaturate(a1 - b1)
  2695. // ...
  2696. // r7 := SignedSaturate(a7 - b7)
  2697. //
  2698. // https://technet.microsoft.com/en-us/subscriptions/3247z5b8(v=vs.90)
  2699. FORCE_INLINE __m128i _mm_subs_epi16(__m128i a, __m128i b)
  2700. {
  2701. return vreinterpretq_m128i_s16(
  2702. vqsubq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
  2703. }
  2704. // Subtract packed double-precision (64-bit) floating-point elements in b from
  2705. // packed double-precision (64-bit) floating-point elements in a, and store the
  2706. // results in dst.
  2707. //
  2708. // FOR j := 0 to 1
  2709. // i := j*64
  2710. // dst[i+63:i] := a[i+63:i] - b[i+63:i]
  2711. // ENDFOR
  2712. //
  2713. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_sub_pd
  2714. FORCE_INLINE __m128d _mm_sub_pd(__m128d a, __m128d b)
  2715. {
  2716. #if defined(__aarch64__)
  2717. return vreinterpretq_m128d_f64(
  2718. vsubq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
  2719. #else
  2720. double *da = (double *) &a;
  2721. double *db = (double *) &b;
  2722. double c[2];
  2723. c[0] = da[0] - db[0];
  2724. c[1] = da[1] - db[1];
  2725. return vld1q_f32((float32_t *) c);
  2726. #endif
  2727. }
  2728. // Subtract the lower double-precision (64-bit) floating-point element in b from
  2729. // the lower double-precision (64-bit) floating-point element in a, store the
  2730. // result in the lower element of dst, and copy the upper element from a to the
  2731. // upper element of dst.
  2732. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_sd
  2733. FORCE_INLINE __m128d _mm_sub_sd(__m128d a, __m128d b)
  2734. {
  2735. return _mm_move_sd(a, _mm_sub_pd(a, b));
  2736. }
  2737. // Add packed unsigned 16-bit integers in a and b using saturation, and store
  2738. // the results in dst.
  2739. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_adds_epu16
  2740. FORCE_INLINE __m128i _mm_adds_epu16(__m128i a, __m128i b)
  2741. {
  2742. return vreinterpretq_m128i_u16(
  2743. vqaddq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b)));
  2744. }
  2745. // Negate packed 8-bit integers in a when the corresponding signed
  2746. // 8-bit integer in b is negative, and store the results in dst.
  2747. // Element in dst are zeroed out when the corresponding element
  2748. // in b is zero.
  2749. //
  2750. // for i in 0..15
  2751. // if b[i] < 0
  2752. // r[i] := -a[i]
  2753. // else if b[i] == 0
  2754. // r[i] := 0
  2755. // else
  2756. // r[i] := a[i]
  2757. // fi
  2758. // done
  2759. FORCE_INLINE __m128i _mm_sign_epi8(__m128i _a, __m128i _b)
  2760. {
  2761. int8x16_t a = vreinterpretq_s8_m128i(_a);
  2762. int8x16_t b = vreinterpretq_s8_m128i(_b);
  2763. // signed shift right: faster than vclt
  2764. // (b < 0) ? 0xFF : 0
  2765. uint8x16_t ltMask = vreinterpretq_u8_s8(vshrq_n_s8(b, 7));
  2766. // (b == 0) ? 0xFF : 0
  2767. #if defined(__aarch64__)
  2768. int8x16_t zeroMask = vreinterpretq_s8_u8(vceqzq_s8(b));
  2769. #else
  2770. int8x16_t zeroMask = vreinterpretq_s8_u8(vceqq_s8(b, vdupq_n_s8(0)));
  2771. #endif
  2772. // bitwise select either a or nagative 'a' (vnegq_s8(a) return nagative 'a')
  2773. // based on ltMask
  2774. int8x16_t masked = vbslq_s8(ltMask, vnegq_s8(a), a);
  2775. // res = masked & (~zeroMask)
  2776. int8x16_t res = vbicq_s8(masked, zeroMask);
  2777. return vreinterpretq_m128i_s8(res);
  2778. }
  2779. // Negate packed 16-bit integers in a when the corresponding signed
  2780. // 16-bit integer in b is negative, and store the results in dst.
  2781. // Element in dst are zeroed out when the corresponding element
  2782. // in b is zero.
  2783. //
  2784. // for i in 0..7
  2785. // if b[i] < 0
  2786. // r[i] := -a[i]
  2787. // else if b[i] == 0
  2788. // r[i] := 0
  2789. // else
  2790. // r[i] := a[i]
  2791. // fi
  2792. // done
  2793. FORCE_INLINE __m128i _mm_sign_epi16(__m128i _a, __m128i _b)
  2794. {
  2795. int16x8_t a = vreinterpretq_s16_m128i(_a);
  2796. int16x8_t b = vreinterpretq_s16_m128i(_b);
  2797. // signed shift right: faster than vclt
  2798. // (b < 0) ? 0xFFFF : 0
  2799. uint16x8_t ltMask = vreinterpretq_u16_s16(vshrq_n_s16(b, 15));
  2800. // (b == 0) ? 0xFFFF : 0
  2801. #if defined(__aarch64__)
  2802. int16x8_t zeroMask = vreinterpretq_s16_u16(vceqzq_s16(b));
  2803. #else
  2804. int16x8_t zeroMask = vreinterpretq_s16_u16(vceqq_s16(b, vdupq_n_s16(0)));
  2805. #endif
  2806. // bitwise select either a or negative 'a' (vnegq_s16(a) equals to negative
  2807. // 'a') based on ltMask
  2808. int16x8_t masked = vbslq_s16(ltMask, vnegq_s16(a), a);
  2809. // res = masked & (~zeroMask)
  2810. int16x8_t res = vbicq_s16(masked, zeroMask);
  2811. return vreinterpretq_m128i_s16(res);
  2812. }
  2813. // Negate packed 32-bit integers in a when the corresponding signed
  2814. // 32-bit integer in b is negative, and store the results in dst.
  2815. // Element in dst are zeroed out when the corresponding element
  2816. // in b is zero.
  2817. //
  2818. // for i in 0..3
  2819. // if b[i] < 0
  2820. // r[i] := -a[i]
  2821. // else if b[i] == 0
  2822. // r[i] := 0
  2823. // else
  2824. // r[i] := a[i]
  2825. // fi
  2826. // done
  2827. FORCE_INLINE __m128i _mm_sign_epi32(__m128i _a, __m128i _b)
  2828. {
  2829. int32x4_t a = vreinterpretq_s32_m128i(_a);
  2830. int32x4_t b = vreinterpretq_s32_m128i(_b);
  2831. // signed shift right: faster than vclt
  2832. // (b < 0) ? 0xFFFFFFFF : 0
  2833. uint32x4_t ltMask = vreinterpretq_u32_s32(vshrq_n_s32(b, 31));
  2834. // (b == 0) ? 0xFFFFFFFF : 0
  2835. #if defined(__aarch64__)
  2836. int32x4_t zeroMask = vreinterpretq_s32_u32(vceqzq_s32(b));
  2837. #else
  2838. int32x4_t zeroMask = vreinterpretq_s32_u32(vceqq_s32(b, vdupq_n_s32(0)));
  2839. #endif
  2840. // bitwise select either a or negative 'a' (vnegq_s32(a) equals to negative
  2841. // 'a') based on ltMask
  2842. int32x4_t masked = vbslq_s32(ltMask, vnegq_s32(a), a);
  2843. // res = masked & (~zeroMask)
  2844. int32x4_t res = vbicq_s32(masked, zeroMask);
  2845. return vreinterpretq_m128i_s32(res);
  2846. }
  2847. // Negate packed 16-bit integers in a when the corresponding signed 16-bit
  2848. // integer in b is negative, and store the results in dst. Element in dst are
  2849. // zeroed out when the corresponding element in b is zero.
  2850. //
  2851. // FOR j := 0 to 3
  2852. // i := j*16
  2853. // IF b[i+15:i] < 0
  2854. // dst[i+15:i] := -(a[i+15:i])
  2855. // ELSE IF b[i+15:i] == 0
  2856. // dst[i+15:i] := 0
  2857. // ELSE
  2858. // dst[i+15:i] := a[i+15:i]
  2859. // FI
  2860. // ENDFOR
  2861. //
  2862. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sign_pi16
  2863. FORCE_INLINE __m64 _mm_sign_pi16(__m64 _a, __m64 _b)
  2864. {
  2865. int16x4_t a = vreinterpret_s16_m64(_a);
  2866. int16x4_t b = vreinterpret_s16_m64(_b);
  2867. // signed shift right: faster than vclt
  2868. // (b < 0) ? 0xFFFF : 0
  2869. uint16x4_t ltMask = vreinterpret_u16_s16(vshr_n_s16(b, 15));
  2870. // (b == 0) ? 0xFFFF : 0
  2871. #if defined(__aarch64__)
  2872. int16x4_t zeroMask = vreinterpret_s16_u16(vceqz_s16(b));
  2873. #else
  2874. int16x4_t zeroMask = vreinterpret_s16_u16(vceq_s16(b, vdup_n_s16(0)));
  2875. #endif
  2876. // bitwise select either a or nagative 'a' (vneg_s16(a) return nagative 'a')
  2877. // based on ltMask
  2878. int16x4_t masked = vbsl_s16(ltMask, vneg_s16(a), a);
  2879. // res = masked & (~zeroMask)
  2880. int16x4_t res = vbic_s16(masked, zeroMask);
  2881. return vreinterpret_m64_s16(res);
  2882. }
  2883. // Negate packed 32-bit integers in a when the corresponding signed 32-bit
  2884. // integer in b is negative, and store the results in dst. Element in dst are
  2885. // zeroed out when the corresponding element in b is zero.
  2886. //
  2887. // FOR j := 0 to 1
  2888. // i := j*32
  2889. // IF b[i+31:i] < 0
  2890. // dst[i+31:i] := -(a[i+31:i])
  2891. // ELSE IF b[i+31:i] == 0
  2892. // dst[i+31:i] := 0
  2893. // ELSE
  2894. // dst[i+31:i] := a[i+31:i]
  2895. // FI
  2896. // ENDFOR
  2897. //
  2898. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sign_pi32
  2899. FORCE_INLINE __m64 _mm_sign_pi32(__m64 _a, __m64 _b)
  2900. {
  2901. int32x2_t a = vreinterpret_s32_m64(_a);
  2902. int32x2_t b = vreinterpret_s32_m64(_b);
  2903. // signed shift right: faster than vclt
  2904. // (b < 0) ? 0xFFFFFFFF : 0
  2905. uint32x2_t ltMask = vreinterpret_u32_s32(vshr_n_s32(b, 31));
  2906. // (b == 0) ? 0xFFFFFFFF : 0
  2907. #if defined(__aarch64__)
  2908. int32x2_t zeroMask = vreinterpret_s32_u32(vceqz_s32(b));
  2909. #else
  2910. int32x2_t zeroMask = vreinterpret_s32_u32(vceq_s32(b, vdup_n_s32(0)));
  2911. #endif
  2912. // bitwise select either a or nagative 'a' (vneg_s32(a) return nagative 'a')
  2913. // based on ltMask
  2914. int32x2_t masked = vbsl_s32(ltMask, vneg_s32(a), a);
  2915. // res = masked & (~zeroMask)
  2916. int32x2_t res = vbic_s32(masked, zeroMask);
  2917. return vreinterpret_m64_s32(res);
  2918. }
  2919. // Negate packed 8-bit integers in a when the corresponding signed 8-bit integer
  2920. // in b is negative, and store the results in dst. Element in dst are zeroed out
  2921. // when the corresponding element in b is zero.
  2922. //
  2923. // FOR j := 0 to 7
  2924. // i := j*8
  2925. // IF b[i+7:i] < 0
  2926. // dst[i+7:i] := -(a[i+7:i])
  2927. // ELSE IF b[i+7:i] == 0
  2928. // dst[i+7:i] := 0
  2929. // ELSE
  2930. // dst[i+7:i] := a[i+7:i]
  2931. // FI
  2932. // ENDFOR
  2933. //
  2934. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sign_pi8
  2935. FORCE_INLINE __m64 _mm_sign_pi8(__m64 _a, __m64 _b)
  2936. {
  2937. int8x8_t a = vreinterpret_s8_m64(_a);
  2938. int8x8_t b = vreinterpret_s8_m64(_b);
  2939. // signed shift right: faster than vclt
  2940. // (b < 0) ? 0xFF : 0
  2941. uint8x8_t ltMask = vreinterpret_u8_s8(vshr_n_s8(b, 7));
  2942. // (b == 0) ? 0xFF : 0
  2943. #if defined(__aarch64__)
  2944. int8x8_t zeroMask = vreinterpret_s8_u8(vceqz_s8(b));
  2945. #else
  2946. int8x8_t zeroMask = vreinterpret_s8_u8(vceq_s8(b, vdup_n_s8(0)));
  2947. #endif
  2948. // bitwise select either a or nagative 'a' (vneg_s8(a) return nagative 'a')
  2949. // based on ltMask
  2950. int8x8_t masked = vbsl_s8(ltMask, vneg_s8(a), a);
  2951. // res = masked & (~zeroMask)
  2952. int8x8_t res = vbic_s8(masked, zeroMask);
  2953. return vreinterpret_m64_s8(res);
  2954. }
  2955. // Average packed unsigned 16-bit integers in a and b, and store the results in
  2956. // dst.
  2957. //
  2958. // FOR j := 0 to 3
  2959. // i := j*16
  2960. // dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1
  2961. // ENDFOR
  2962. //
  2963. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_avg_pu16
  2964. FORCE_INLINE __m64 _mm_avg_pu16(__m64 a, __m64 b)
  2965. {
  2966. return vreinterpret_m64_u16(
  2967. vrhadd_u16(vreinterpret_u16_m64(a), vreinterpret_u16_m64(b)));
  2968. }
  2969. // Average packed unsigned 8-bit integers in a and b, and store the results in
  2970. // dst.
  2971. //
  2972. // FOR j := 0 to 7
  2973. // i := j*8
  2974. // dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1
  2975. // ENDFOR
  2976. //
  2977. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_avg_pu8
  2978. FORCE_INLINE __m64 _mm_avg_pu8(__m64 a, __m64 b)
  2979. {
  2980. return vreinterpret_m64_u8(
  2981. vrhadd_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b)));
  2982. }
  2983. // Average packed unsigned 8-bit integers in a and b, and store the results in
  2984. // dst.
  2985. //
  2986. // FOR j := 0 to 7
  2987. // i := j*8
  2988. // dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1
  2989. // ENDFOR
  2990. //
  2991. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pavgb
  2992. #define _m_pavgb(a, b) _mm_avg_pu8(a, b)
  2993. // Average packed unsigned 16-bit integers in a and b, and store the results in
  2994. // dst.
  2995. //
  2996. // FOR j := 0 to 3
  2997. // i := j*16
  2998. // dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1
  2999. // ENDFOR
  3000. //
  3001. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pavgw
  3002. #define _m_pavgw(a, b) _mm_avg_pu16(a, b)
  3003. // Extract a 16-bit integer from a, selected with imm8, and store the result in
  3004. // the lower element of dst.
  3005. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pextrw
  3006. #define _m_pextrw(a, imm) _mm_extract_pi16(a, imm)
  3007. // Copy a to dst, and insert the 16-bit integer i into dst at the location
  3008. // specified by imm8.
  3009. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=m_pinsrw
  3010. #define _m_pinsrw(a, i, imm) _mm_insert_pi16(a, i, imm)
  3011. // Compare packed signed 16-bit integers in a and b, and store packed maximum
  3012. // values in dst.
  3013. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pmaxsw
  3014. #define _m_pmaxsw(a, b) _mm_max_pi16(a, b)
  3015. // Compare packed unsigned 8-bit integers in a and b, and store packed maximum
  3016. // values in dst.
  3017. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pmaxub
  3018. #define _m_pmaxub(a, b) _mm_max_pu8(a, b)
  3019. // Compare packed signed 16-bit integers in a and b, and store packed minimum
  3020. // values in dst.
  3021. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pminsw
  3022. #define _m_pminsw(a, b) _mm_min_pi16(a, b)
  3023. // Compare packed unsigned 8-bit integers in a and b, and store packed minimum
  3024. // values in dst.
  3025. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pminub
  3026. #define _m_pminub(a, b) _mm_min_pu8(a, b)
  3027. // Computes the average of the 16 unsigned 8-bit integers in a and the 16
  3028. // unsigned 8-bit integers in b and rounds.
  3029. //
  3030. // r0 := (a0 + b0) / 2
  3031. // r1 := (a1 + b1) / 2
  3032. // ...
  3033. // r15 := (a15 + b15) / 2
  3034. //
  3035. // https://msdn.microsoft.com/en-us/library/vstudio/8zwh554a(v%3dvs.90).aspx
  3036. FORCE_INLINE __m128i _mm_avg_epu8(__m128i a, __m128i b)
  3037. {
  3038. return vreinterpretq_m128i_u8(
  3039. vrhaddq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
  3040. }
  3041. // Computes the average of the 8 unsigned 16-bit integers in a and the 8
  3042. // unsigned 16-bit integers in b and rounds.
  3043. //
  3044. // r0 := (a0 + b0) / 2
  3045. // r1 := (a1 + b1) / 2
  3046. // ...
  3047. // r7 := (a7 + b7) / 2
  3048. //
  3049. // https://msdn.microsoft.com/en-us/library/vstudio/y13ca3c8(v=vs.90).aspx
  3050. FORCE_INLINE __m128i _mm_avg_epu16(__m128i a, __m128i b)
  3051. {
  3052. return (__m128i) vrhaddq_u16(vreinterpretq_u16_m128i(a),
  3053. vreinterpretq_u16_m128i(b));
  3054. }
  3055. // Adds the four single-precision, floating-point values of a and b.
  3056. //
  3057. // r0 := a0 + b0
  3058. // r1 := a1 + b1
  3059. // r2 := a2 + b2
  3060. // r3 := a3 + b3
  3061. //
  3062. // https://msdn.microsoft.com/en-us/library/vstudio/c9848chc(v=vs.100).aspx
  3063. FORCE_INLINE __m128 _mm_add_ps(__m128 a, __m128 b)
  3064. {
  3065. return vreinterpretq_m128_f32(
  3066. vaddq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  3067. }
  3068. // Add packed double-precision (64-bit) floating-point elements in a and b, and
  3069. // store the results in dst.
  3070. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_pd
  3071. FORCE_INLINE __m128d _mm_add_pd(__m128d a, __m128d b)
  3072. {
  3073. #if defined(__aarch64__)
  3074. return vreinterpretq_m128d_f64(
  3075. vaddq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
  3076. #else
  3077. double *da = (double *) &a;
  3078. double *db = (double *) &b;
  3079. double c[2];
  3080. c[0] = da[0] + db[0];
  3081. c[1] = da[1] + db[1];
  3082. return vld1q_f32((float32_t *) c);
  3083. #endif
  3084. }
  3085. // Add the lower double-precision (64-bit) floating-point element in a and b,
  3086. // store the result in the lower element of dst, and copy the upper element from
  3087. // a to the upper element of dst.
  3088. //
  3089. // dst[63:0] := a[63:0] + b[63:0]
  3090. // dst[127:64] := a[127:64]
  3091. //
  3092. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_sd
  3093. FORCE_INLINE __m128d _mm_add_sd(__m128d a, __m128d b)
  3094. {
  3095. #if defined(__aarch64__)
  3096. return _mm_move_sd(a, _mm_add_pd(a, b));
  3097. #else
  3098. double *da = (double *) &a;
  3099. double *db = (double *) &b;
  3100. double c[2];
  3101. c[0] = da[0] + db[0];
  3102. c[1] = da[1];
  3103. return vld1q_f32((float32_t *) c);
  3104. #endif
  3105. }
  3106. // Add 64-bit integers a and b, and store the result in dst.
  3107. //
  3108. // dst[63:0] := a[63:0] + b[63:0]
  3109. //
  3110. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_si64
  3111. FORCE_INLINE __m64 _mm_add_si64(__m64 a, __m64 b)
  3112. {
  3113. return vreinterpret_m64_s64(
  3114. vadd_s64(vreinterpret_s64_m64(a), vreinterpret_s64_m64(b)));
  3115. }
  3116. // adds the scalar single-precision floating point values of a and b.
  3117. // https://msdn.microsoft.com/en-us/library/be94x2y6(v=vs.100).aspx
  3118. FORCE_INLINE __m128 _mm_add_ss(__m128 a, __m128 b)
  3119. {
  3120. float32_t b0 = vgetq_lane_f32(vreinterpretq_f32_m128(b), 0);
  3121. float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0);
  3122. // the upper values in the result must be the remnants of <a>.
  3123. return vreinterpretq_m128_f32(vaddq_f32(a, value));
  3124. }
  3125. // Adds the 4 signed or unsigned 64-bit integers in a to the 4 signed or
  3126. // unsigned 32-bit integers in b.
  3127. // https://msdn.microsoft.com/en-us/library/vstudio/09xs4fkk(v=vs.100).aspx
  3128. FORCE_INLINE __m128i _mm_add_epi64(__m128i a, __m128i b)
  3129. {
  3130. return vreinterpretq_m128i_s64(
  3131. vaddq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b)));
  3132. }
  3133. // Adds the 4 signed or unsigned 32-bit integers in a to the 4 signed or
  3134. // unsigned 32-bit integers in b.
  3135. //
  3136. // r0 := a0 + b0
  3137. // r1 := a1 + b1
  3138. // r2 := a2 + b2
  3139. // r3 := a3 + b3
  3140. //
  3141. // https://msdn.microsoft.com/en-us/library/vstudio/09xs4fkk(v=vs.100).aspx
  3142. FORCE_INLINE __m128i _mm_add_epi32(__m128i a, __m128i b)
  3143. {
  3144. return vreinterpretq_m128i_s32(
  3145. vaddq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
  3146. }
  3147. // Adds the 8 signed or unsigned 16-bit integers in a to the 8 signed or
  3148. // unsigned 16-bit integers in b.
  3149. // https://msdn.microsoft.com/en-us/library/fceha5k4(v=vs.100).aspx
  3150. FORCE_INLINE __m128i _mm_add_epi16(__m128i a, __m128i b)
  3151. {
  3152. return vreinterpretq_m128i_s16(
  3153. vaddq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
  3154. }
  3155. // Adds the 16 signed or unsigned 8-bit integers in a to the 16 signed or
  3156. // unsigned 8-bit integers in b.
  3157. // https://technet.microsoft.com/en-us/subscriptions/yc7tcyzs(v=vs.90)
  3158. FORCE_INLINE __m128i _mm_add_epi8(__m128i a, __m128i b)
  3159. {
  3160. return vreinterpretq_m128i_s8(
  3161. vaddq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
  3162. }
  3163. // Adds the 8 signed 16-bit integers in a to the 8 signed 16-bit integers in b
  3164. // and saturates.
  3165. //
  3166. // r0 := SignedSaturate(a0 + b0)
  3167. // r1 := SignedSaturate(a1 + b1)
  3168. // ...
  3169. // r7 := SignedSaturate(a7 + b7)
  3170. //
  3171. // https://msdn.microsoft.com/en-us/library/1a306ef8(v=vs.100).aspx
  3172. FORCE_INLINE __m128i _mm_adds_epi16(__m128i a, __m128i b)
  3173. {
  3174. return vreinterpretq_m128i_s16(
  3175. vqaddq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
  3176. }
  3177. // Add packed signed 8-bit integers in a and b using saturation, and store the
  3178. // results in dst.
  3179. //
  3180. // FOR j := 0 to 15
  3181. // i := j*8
  3182. // dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] )
  3183. // ENDFOR
  3184. //
  3185. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_adds_epi8
  3186. FORCE_INLINE __m128i _mm_adds_epi8(__m128i a, __m128i b)
  3187. {
  3188. return vreinterpretq_m128i_s8(
  3189. vqaddq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
  3190. }
  3191. // Adds the 16 unsigned 8-bit integers in a to the 16 unsigned 8-bit integers in
  3192. // b and saturates..
  3193. // https://msdn.microsoft.com/en-us/library/9hahyddy(v=vs.100).aspx
  3194. FORCE_INLINE __m128i _mm_adds_epu8(__m128i a, __m128i b)
  3195. {
  3196. return vreinterpretq_m128i_u8(
  3197. vqaddq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
  3198. }
  3199. // Multiplies the 8 signed or unsigned 16-bit integers from a by the 8 signed or
  3200. // unsigned 16-bit integers from b.
  3201. //
  3202. // r0 := (a0 * b0)[15:0]
  3203. // r1 := (a1 * b1)[15:0]
  3204. // ...
  3205. // r7 := (a7 * b7)[15:0]
  3206. //
  3207. // https://msdn.microsoft.com/en-us/library/vstudio/9ks1472s(v=vs.100).aspx
  3208. FORCE_INLINE __m128i _mm_mullo_epi16(__m128i a, __m128i b)
  3209. {
  3210. return vreinterpretq_m128i_s16(
  3211. vmulq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
  3212. }
  3213. // Multiplies the 4 signed or unsigned 32-bit integers from a by the 4 signed or
  3214. // unsigned 32-bit integers from b.
  3215. // https://msdn.microsoft.com/en-us/library/vstudio/bb531409(v=vs.100).aspx
  3216. FORCE_INLINE __m128i _mm_mullo_epi32(__m128i a, __m128i b)
  3217. {
  3218. return vreinterpretq_m128i_s32(
  3219. vmulq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
  3220. }
  3221. // Multiply the packed unsigned 16-bit integers in a and b, producing
  3222. // intermediate 32-bit integers, and store the high 16 bits of the intermediate
  3223. // integers in dst.
  3224. //
  3225. // FOR j := 0 to 3
  3226. // i := j*16
  3227. // tmp[31:0] := a[i+15:i] * b[i+15:i]
  3228. // dst[i+15:i] := tmp[31:16]
  3229. // ENDFOR
  3230. //
  3231. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pmulhuw
  3232. #define _m_pmulhuw(a, b) _mm_mulhi_pu16(a, b)
  3233. // Multiplies the four single-precision, floating-point values of a and b.
  3234. //
  3235. // r0 := a0 * b0
  3236. // r1 := a1 * b1
  3237. // r2 := a2 * b2
  3238. // r3 := a3 * b3
  3239. //
  3240. // https://msdn.microsoft.com/en-us/library/vstudio/22kbk6t9(v=vs.100).aspx
  3241. FORCE_INLINE __m128 _mm_mul_ps(__m128 a, __m128 b)
  3242. {
  3243. return vreinterpretq_m128_f32(
  3244. vmulq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  3245. }
  3246. // Multiply packed double-precision (64-bit) floating-point elements in a and b,
  3247. // and store the results in dst.
  3248. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_pd
  3249. FORCE_INLINE __m128d _mm_mul_pd(__m128d a, __m128d b)
  3250. {
  3251. #if defined(__aarch64__)
  3252. return vreinterpretq_m128d_f64(
  3253. vmulq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
  3254. #else
  3255. double *da = (double *) &a;
  3256. double *db = (double *) &b;
  3257. double c[2];
  3258. c[0] = da[0] * db[0];
  3259. c[1] = da[1] * db[1];
  3260. return vld1q_f32((float32_t *) c);
  3261. #endif
  3262. }
  3263. // Multiply the lower double-precision (64-bit) floating-point element in a and
  3264. // b, store the result in the lower element of dst, and copy the upper element
  3265. // from a to the upper element of dst.
  3266. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mul_sd
  3267. FORCE_INLINE __m128d _mm_mul_sd(__m128d a, __m128d b)
  3268. {
  3269. return _mm_move_sd(a, _mm_mul_pd(a, b));
  3270. }
  3271. // Multiply the lower single-precision (32-bit) floating-point element in a and
  3272. // b, store the result in the lower element of dst, and copy the upper 3 packed
  3273. // elements from a to the upper elements of dst.
  3274. //
  3275. // dst[31:0] := a[31:0] * b[31:0]
  3276. // dst[127:32] := a[127:32]
  3277. //
  3278. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_ss
  3279. FORCE_INLINE __m128 _mm_mul_ss(__m128 a, __m128 b)
  3280. {
  3281. return _mm_move_ss(a, _mm_mul_ps(a, b));
  3282. }
  3283. // Multiply the low unsigned 32-bit integers from each packed 64-bit element in
  3284. // a and b, and store the unsigned 64-bit results in dst.
  3285. //
  3286. // r0 := (a0 & 0xFFFFFFFF) * (b0 & 0xFFFFFFFF)
  3287. // r1 := (a2 & 0xFFFFFFFF) * (b2 & 0xFFFFFFFF)
  3288. FORCE_INLINE __m128i _mm_mul_epu32(__m128i a, __m128i b)
  3289. {
  3290. // vmull_u32 upcasts instead of masking, so we downcast.
  3291. uint32x2_t a_lo = vmovn_u64(vreinterpretq_u64_m128i(a));
  3292. uint32x2_t b_lo = vmovn_u64(vreinterpretq_u64_m128i(b));
  3293. return vreinterpretq_m128i_u64(vmull_u32(a_lo, b_lo));
  3294. }
  3295. // Multiply the low unsigned 32-bit integers from a and b, and store the
  3296. // unsigned 64-bit result in dst.
  3297. //
  3298. // dst[63:0] := a[31:0] * b[31:0]
  3299. //
  3300. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_su32
  3301. FORCE_INLINE __m64 _mm_mul_su32(__m64 a, __m64 b)
  3302. {
  3303. return vreinterpret_m64_u64(vget_low_u64(
  3304. vmull_u32(vreinterpret_u32_m64(a), vreinterpret_u32_m64(b))));
  3305. }
  3306. // Multiply the low signed 32-bit integers from each packed 64-bit element in
  3307. // a and b, and store the signed 64-bit results in dst.
  3308. //
  3309. // r0 := (int64_t)(int32_t)a0 * (int64_t)(int32_t)b0
  3310. // r1 := (int64_t)(int32_t)a2 * (int64_t)(int32_t)b2
  3311. FORCE_INLINE __m128i _mm_mul_epi32(__m128i a, __m128i b)
  3312. {
  3313. // vmull_s32 upcasts instead of masking, so we downcast.
  3314. int32x2_t a_lo = vmovn_s64(vreinterpretq_s64_m128i(a));
  3315. int32x2_t b_lo = vmovn_s64(vreinterpretq_s64_m128i(b));
  3316. return vreinterpretq_m128i_s64(vmull_s32(a_lo, b_lo));
  3317. }
  3318. // Multiplies the 8 signed 16-bit integers from a by the 8 signed 16-bit
  3319. // integers from b.
  3320. //
  3321. // r0 := (a0 * b0) + (a1 * b1)
  3322. // r1 := (a2 * b2) + (a3 * b3)
  3323. // r2 := (a4 * b4) + (a5 * b5)
  3324. // r3 := (a6 * b6) + (a7 * b7)
  3325. // https://msdn.microsoft.com/en-us/library/yht36sa6(v=vs.90).aspx
  3326. FORCE_INLINE __m128i _mm_madd_epi16(__m128i a, __m128i b)
  3327. {
  3328. int32x4_t low = vmull_s16(vget_low_s16(vreinterpretq_s16_m128i(a)),
  3329. vget_low_s16(vreinterpretq_s16_m128i(b)));
  3330. int32x4_t high = vmull_s16(vget_high_s16(vreinterpretq_s16_m128i(a)),
  3331. vget_high_s16(vreinterpretq_s16_m128i(b)));
  3332. int32x2_t low_sum = vpadd_s32(vget_low_s32(low), vget_high_s32(low));
  3333. int32x2_t high_sum = vpadd_s32(vget_low_s32(high), vget_high_s32(high));
  3334. return vreinterpretq_m128i_s32(vcombine_s32(low_sum, high_sum));
  3335. }
  3336. // Multiply packed signed 16-bit integers in a and b, producing intermediate
  3337. // signed 32-bit integers. Shift right by 15 bits while rounding up, and store
  3338. // the packed 16-bit integers in dst.
  3339. //
  3340. // r0 := Round(((int32_t)a0 * (int32_t)b0) >> 15)
  3341. // r1 := Round(((int32_t)a1 * (int32_t)b1) >> 15)
  3342. // r2 := Round(((int32_t)a2 * (int32_t)b2) >> 15)
  3343. // ...
  3344. // r7 := Round(((int32_t)a7 * (int32_t)b7) >> 15)
  3345. FORCE_INLINE __m128i _mm_mulhrs_epi16(__m128i a, __m128i b)
  3346. {
  3347. // Has issues due to saturation
  3348. // return vreinterpretq_m128i_s16(vqrdmulhq_s16(a, b));
  3349. // Multiply
  3350. int32x4_t mul_lo = vmull_s16(vget_low_s16(vreinterpretq_s16_m128i(a)),
  3351. vget_low_s16(vreinterpretq_s16_m128i(b)));
  3352. int32x4_t mul_hi = vmull_s16(vget_high_s16(vreinterpretq_s16_m128i(a)),
  3353. vget_high_s16(vreinterpretq_s16_m128i(b)));
  3354. // Rounding narrowing shift right
  3355. // narrow = (int16_t)((mul + 16384) >> 15);
  3356. int16x4_t narrow_lo = vrshrn_n_s32(mul_lo, 15);
  3357. int16x4_t narrow_hi = vrshrn_n_s32(mul_hi, 15);
  3358. // Join together
  3359. return vreinterpretq_m128i_s16(vcombine_s16(narrow_lo, narrow_hi));
  3360. }
  3361. // Vertically multiply each unsigned 8-bit integer from a with the corresponding
  3362. // signed 8-bit integer from b, producing intermediate signed 16-bit integers.
  3363. // Horizontally add adjacent pairs of intermediate signed 16-bit integers,
  3364. // and pack the saturated results in dst.
  3365. //
  3366. // FOR j := 0 to 7
  3367. // i := j*16
  3368. // dst[i+15:i] := Saturate_To_Int16( a[i+15:i+8]*b[i+15:i+8] +
  3369. // a[i+7:i]*b[i+7:i] )
  3370. // ENDFOR
  3371. FORCE_INLINE __m128i _mm_maddubs_epi16(__m128i _a, __m128i _b)
  3372. {
  3373. #if defined(__aarch64__)
  3374. uint8x16_t a = vreinterpretq_u8_m128i(_a);
  3375. int8x16_t b = vreinterpretq_s8_m128i(_b);
  3376. int16x8_t tl = vmulq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(a))),
  3377. vmovl_s8(vget_low_s8(b)));
  3378. int16x8_t th = vmulq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(a))),
  3379. vmovl_s8(vget_high_s8(b)));
  3380. return vreinterpretq_m128i_s16(
  3381. vqaddq_s16(vuzp1q_s16(tl, th), vuzp2q_s16(tl, th)));
  3382. #else
  3383. // This would be much simpler if x86 would choose to zero extend OR sign
  3384. // extend, not both. This could probably be optimized better.
  3385. uint16x8_t a = vreinterpretq_u16_m128i(_a);
  3386. int16x8_t b = vreinterpretq_s16_m128i(_b);
  3387. // Zero extend a
  3388. int16x8_t a_odd = vreinterpretq_s16_u16(vshrq_n_u16(a, 8));
  3389. int16x8_t a_even = vreinterpretq_s16_u16(vbicq_u16(a, vdupq_n_u16(0xff00)));
  3390. // Sign extend by shifting left then shifting right.
  3391. int16x8_t b_even = vshrq_n_s16(vshlq_n_s16(b, 8), 8);
  3392. int16x8_t b_odd = vshrq_n_s16(b, 8);
  3393. // multiply
  3394. int16x8_t prod1 = vmulq_s16(a_even, b_even);
  3395. int16x8_t prod2 = vmulq_s16(a_odd, b_odd);
  3396. // saturated add
  3397. return vreinterpretq_m128i_s16(vqaddq_s16(prod1, prod2));
  3398. #endif
  3399. }
  3400. // Computes the fused multiple add product of 32-bit floating point numbers.
  3401. //
  3402. // Return Value
  3403. // Multiplies A and B, and adds C to the temporary result before returning it.
  3404. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmadd
  3405. FORCE_INLINE __m128 _mm_fmadd_ps(__m128 a, __m128 b, __m128 c)
  3406. {
  3407. #if defined(__aarch64__)
  3408. return vreinterpretq_m128_f32(vfmaq_f32(vreinterpretq_f32_m128(c),
  3409. vreinterpretq_f32_m128(b),
  3410. vreinterpretq_f32_m128(a)));
  3411. #else
  3412. return _mm_add_ps(_mm_mul_ps(a, b), c);
  3413. #endif
  3414. }
  3415. // Alternatively add and subtract packed single-precision (32-bit)
  3416. // floating-point elements in a to/from packed elements in b, and store the
  3417. // results in dst.
  3418. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=addsub_ps
  3419. FORCE_INLINE __m128 _mm_addsub_ps(__m128 a, __m128 b)
  3420. {
  3421. __m128 mask = {-1.0f, 1.0f, -1.0f, 1.0f};
  3422. return _mm_fmadd_ps(b, mask, a);
  3423. }
  3424. // Horizontally add adjacent pairs of double-precision (64-bit) floating-point
  3425. // elements in a and b, and pack the results in dst.
  3426. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hadd_pd
  3427. FORCE_INLINE __m128d _mm_hadd_pd(__m128d a, __m128d b)
  3428. {
  3429. #if defined(__aarch64__)
  3430. return vreinterpretq_m128d_f64(
  3431. vpaddq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
  3432. #else
  3433. double *da = (double *) &a;
  3434. double *db = (double *) &b;
  3435. double c[] = {da[0] + da[1], db[0] + db[1]};
  3436. return vreinterpretq_m128d_u64(vld1q_u64((uint64_t *) c));
  3437. #endif
  3438. }
  3439. // Compute the absolute differences of packed unsigned 8-bit integers in a and
  3440. // b, then horizontally sum each consecutive 8 differences to produce two
  3441. // unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low
  3442. // 16 bits of 64-bit elements in dst.
  3443. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sad_epu8
  3444. FORCE_INLINE __m128i _mm_sad_epu8(__m128i a, __m128i b)
  3445. {
  3446. uint16x8_t t = vpaddlq_u8(vabdq_u8((uint8x16_t) a, (uint8x16_t) b));
  3447. uint16_t r0 = t[0] + t[1] + t[2] + t[3];
  3448. uint16_t r4 = t[4] + t[5] + t[6] + t[7];
  3449. uint16x8_t r = vsetq_lane_u16(r0, vdupq_n_u16(0), 0);
  3450. return (__m128i) vsetq_lane_u16(r4, r, 4);
  3451. }
  3452. // Compute the absolute differences of packed unsigned 8-bit integers in a and
  3453. // b, then horizontally sum each consecutive 8 differences to produce four
  3454. // unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low
  3455. // 16 bits of dst.
  3456. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sad_pu8
  3457. FORCE_INLINE __m64 _mm_sad_pu8(__m64 a, __m64 b)
  3458. {
  3459. uint16x4_t t =
  3460. vpaddl_u8(vabd_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b)));
  3461. uint16_t r0 = t[0] + t[1] + t[2] + t[3];
  3462. return vreinterpret_m64_u16(vset_lane_u16(r0, vdup_n_u16(0), 0));
  3463. }
  3464. // Compute the absolute differences of packed unsigned 8-bit integers in a and
  3465. // b, then horizontally sum each consecutive 8 differences to produce four
  3466. // unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low
  3467. // 16 bits of dst.
  3468. //
  3469. // FOR j := 0 to 7
  3470. // i := j*8
  3471. // tmp[i+7:i] := ABS(a[i+7:i] - b[i+7:i])
  3472. // ENDFOR
  3473. // dst[15:0] := tmp[7:0] + tmp[15:8] + tmp[23:16] + tmp[31:24] + tmp[39:32] +
  3474. // tmp[47:40] + tmp[55:48] + tmp[63:56] dst[63:16] := 0
  3475. //
  3476. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_psadbw
  3477. #define _m_psadbw(a, b) _mm_sad_pu8(a, b)
  3478. // Divides the four single-precision, floating-point values of a and b.
  3479. //
  3480. // r0 := a0 / b0
  3481. // r1 := a1 / b1
  3482. // r2 := a2 / b2
  3483. // r3 := a3 / b3
  3484. //
  3485. // https://msdn.microsoft.com/en-us/library/edaw8147(v=vs.100).aspx
  3486. FORCE_INLINE __m128 _mm_div_ps(__m128 a, __m128 b)
  3487. {
  3488. #if defined(__aarch64__) && !SSE2NEON_PRECISE_DIV
  3489. return vreinterpretq_m128_f32(
  3490. vdivq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  3491. #else
  3492. float32x4_t recip = vrecpeq_f32(vreinterpretq_f32_m128(b));
  3493. recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(b)));
  3494. #if SSE2NEON_PRECISE_DIV
  3495. // Additional Netwon-Raphson iteration for accuracy
  3496. recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(b)));
  3497. #endif
  3498. return vreinterpretq_m128_f32(vmulq_f32(vreinterpretq_f32_m128(a), recip));
  3499. #endif
  3500. }
  3501. // Divides the scalar single-precision floating point value of a by b.
  3502. // https://msdn.microsoft.com/en-us/library/4y73xa49(v=vs.100).aspx
  3503. FORCE_INLINE __m128 _mm_div_ss(__m128 a, __m128 b)
  3504. {
  3505. float32_t value =
  3506. vgetq_lane_f32(vreinterpretq_f32_m128(_mm_div_ps(a, b)), 0);
  3507. return vreinterpretq_m128_f32(
  3508. vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0));
  3509. }
  3510. // Divide packed double-precision (64-bit) floating-point elements in a by
  3511. // packed elements in b, and store the results in dst.
  3512. //
  3513. // FOR j := 0 to 1
  3514. // i := 64*j
  3515. // dst[i+63:i] := a[i+63:i] / b[i+63:i]
  3516. // ENDFOR
  3517. //
  3518. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_div_pd
  3519. FORCE_INLINE __m128d _mm_div_pd(__m128d a, __m128d b)
  3520. {
  3521. #if defined(__aarch64__)
  3522. return vreinterpretq_m128d_f64(
  3523. vdivq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
  3524. #else
  3525. double *da = (double *) &a;
  3526. double *db = (double *) &b;
  3527. double c[2];
  3528. c[0] = da[0] / db[0];
  3529. c[1] = da[1] / db[1];
  3530. return vld1q_f32((float32_t *) c);
  3531. #endif
  3532. }
  3533. // Divide the lower double-precision (64-bit) floating-point element in a by the
  3534. // lower double-precision (64-bit) floating-point element in b, store the result
  3535. // in the lower element of dst, and copy the upper element from a to the upper
  3536. // element of dst.
  3537. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_div_sd
  3538. FORCE_INLINE __m128d _mm_div_sd(__m128d a, __m128d b)
  3539. {
  3540. #if defined(__aarch64__)
  3541. float64x2_t tmp =
  3542. vdivq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b));
  3543. return vreinterpretq_m128d_f64(
  3544. vsetq_lane_f64(vgetq_lane_f64(vreinterpretq_f64_m128d(a), 1), tmp, 1));
  3545. #else
  3546. return _mm_move_sd(a, _mm_div_pd(a, b));
  3547. #endif
  3548. }
  3549. // Compute the approximate reciprocal of packed single-precision (32-bit)
  3550. // floating-point elements in a, and store the results in dst. The maximum
  3551. // relative error for this approximation is less than 1.5*2^-12.
  3552. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rcp_ps
  3553. FORCE_INLINE __m128 _mm_rcp_ps(__m128 in)
  3554. {
  3555. float32x4_t recip = vrecpeq_f32(vreinterpretq_f32_m128(in));
  3556. recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(in)));
  3557. #if SSE2NEON_PRECISE_DIV
  3558. // Additional Netwon-Raphson iteration for accuracy
  3559. recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(in)));
  3560. #endif
  3561. return vreinterpretq_m128_f32(recip);
  3562. }
  3563. // Compute the approximate reciprocal of the lower single-precision (32-bit)
  3564. // floating-point element in a, store the result in the lower element of dst,
  3565. // and copy the upper 3 packed elements from a to the upper elements of dst. The
  3566. // maximum relative error for this approximation is less than 1.5*2^-12.
  3567. //
  3568. // dst[31:0] := (1.0 / a[31:0])
  3569. // dst[127:32] := a[127:32]
  3570. //
  3571. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rcp_ss
  3572. FORCE_INLINE __m128 _mm_rcp_ss(__m128 a)
  3573. {
  3574. return _mm_move_ss(a, _mm_rcp_ps(a));
  3575. }
  3576. // Computes the approximations of square roots of the four single-precision,
  3577. // floating-point values of a. First computes reciprocal square roots and then
  3578. // reciprocals of the four values.
  3579. //
  3580. // r0 := sqrt(a0)
  3581. // r1 := sqrt(a1)
  3582. // r2 := sqrt(a2)
  3583. // r3 := sqrt(a3)
  3584. //
  3585. // https://msdn.microsoft.com/en-us/library/vstudio/8z67bwwk(v=vs.100).aspx
  3586. FORCE_INLINE __m128 _mm_sqrt_ps(__m128 in)
  3587. {
  3588. #if SSE2NEON_PRECISE_SQRT
  3589. float32x4_t recip = vrsqrteq_f32(vreinterpretq_f32_m128(in));
  3590. // Test for vrsqrteq_f32(0) -> positive infinity case.
  3591. // Change to zero, so that s * 1/sqrt(s) result is zero too.
  3592. const uint32x4_t pos_inf = vdupq_n_u32(0x7F800000);
  3593. const uint32x4_t div_by_zero =
  3594. vceqq_u32(pos_inf, vreinterpretq_u32_f32(recip));
  3595. recip = vreinterpretq_f32_u32(
  3596. vandq_u32(vmvnq_u32(div_by_zero), vreinterpretq_u32_f32(recip)));
  3597. // Additional Netwon-Raphson iteration for accuracy
  3598. recip = vmulq_f32(
  3599. vrsqrtsq_f32(vmulq_f32(recip, recip), vreinterpretq_f32_m128(in)),
  3600. recip);
  3601. recip = vmulq_f32(
  3602. vrsqrtsq_f32(vmulq_f32(recip, recip), vreinterpretq_f32_m128(in)),
  3603. recip);
  3604. // sqrt(s) = s * 1/sqrt(s)
  3605. return vreinterpretq_m128_f32(vmulq_f32(vreinterpretq_f32_m128(in), recip));
  3606. #elif defined(__aarch64__)
  3607. return vreinterpretq_m128_f32(vsqrtq_f32(vreinterpretq_f32_m128(in)));
  3608. #else
  3609. float32x4_t recipsq = vrsqrteq_f32(vreinterpretq_f32_m128(in));
  3610. float32x4_t sq = vrecpeq_f32(recipsq);
  3611. return vreinterpretq_m128_f32(sq);
  3612. #endif
  3613. }
  3614. // Computes the approximation of the square root of the scalar single-precision
  3615. // floating point value of in.
  3616. // https://msdn.microsoft.com/en-us/library/ahfsc22d(v=vs.100).aspx
  3617. FORCE_INLINE __m128 _mm_sqrt_ss(__m128 in)
  3618. {
  3619. float32_t value =
  3620. vgetq_lane_f32(vreinterpretq_f32_m128(_mm_sqrt_ps(in)), 0);
  3621. return vreinterpretq_m128_f32(
  3622. vsetq_lane_f32(value, vreinterpretq_f32_m128(in), 0));
  3623. }
  3624. // Computes the approximations of the reciprocal square roots of the four
  3625. // single-precision floating point values of in.
  3626. // https://msdn.microsoft.com/en-us/library/22hfsh53(v=vs.100).aspx
  3627. FORCE_INLINE __m128 _mm_rsqrt_ps(__m128 in)
  3628. {
  3629. float32x4_t out = vrsqrteq_f32(vreinterpretq_f32_m128(in));
  3630. #if SSE2NEON_PRECISE_RSQRT
  3631. // Additional Netwon-Raphson iteration for accuracy
  3632. out = vmulq_f32(
  3633. out, vrsqrtsq_f32(vmulq_f32(vreinterpretq_f32_m128(in), out), out));
  3634. out = vmulq_f32(
  3635. out, vrsqrtsq_f32(vmulq_f32(vreinterpretq_f32_m128(in), out), out));
  3636. #endif
  3637. return vreinterpretq_m128_f32(out);
  3638. }
  3639. // Compute the approximate reciprocal square root of the lower single-precision
  3640. // (32-bit) floating-point element in a, store the result in the lower element
  3641. // of dst, and copy the upper 3 packed elements from a to the upper elements of
  3642. // dst.
  3643. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rsqrt_ss
  3644. FORCE_INLINE __m128 _mm_rsqrt_ss(__m128 in)
  3645. {
  3646. return vsetq_lane_f32(vgetq_lane_f32(_mm_rsqrt_ps(in), 0), in, 0);
  3647. }
  3648. // Compare packed signed 16-bit integers in a and b, and store packed maximum
  3649. // values in dst.
  3650. //
  3651. // FOR j := 0 to 3
  3652. // i := j*16
  3653. // dst[i+15:i] := MAX(a[i+15:i], b[i+15:i])
  3654. // ENDFOR
  3655. //
  3656. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_pi16
  3657. FORCE_INLINE __m64 _mm_max_pi16(__m64 a, __m64 b)
  3658. {
  3659. return vreinterpret_m64_s16(
  3660. vmax_s16(vreinterpret_s16_m64(a), vreinterpret_s16_m64(b)));
  3661. }
  3662. // Compare packed signed 16-bit integers in a and b, and store packed maximum
  3663. // values in dst.
  3664. //
  3665. // FOR j := 0 to 3
  3666. // i := j*16
  3667. // dst[i+15:i] := MAX(a[i+15:i], b[i+15:i])
  3668. // ENDFOR
  3669. //
  3670. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_pi16
  3671. #define _m_pmaxsw(a, b) _mm_max_pi16(a, b)
  3672. // Computes the maximums of the four single-precision, floating-point values of
  3673. // a and b.
  3674. // https://msdn.microsoft.com/en-us/library/vstudio/ff5d607a(v=vs.100).aspx
  3675. FORCE_INLINE __m128 _mm_max_ps(__m128 a, __m128 b)
  3676. {
  3677. #if SSE2NEON_PRECISE_MINMAX
  3678. float32x4_t _a = vreinterpretq_f32_m128(a);
  3679. float32x4_t _b = vreinterpretq_f32_m128(b);
  3680. return vbslq_f32(vcltq_f32(_b, _a), _a, _b);
  3681. #else
  3682. return vreinterpretq_m128_f32(
  3683. vmaxq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  3684. #endif
  3685. }
  3686. // Compare packed unsigned 8-bit integers in a and b, and store packed maximum
  3687. // values in dst.
  3688. //
  3689. // FOR j := 0 to 7
  3690. // i := j*8
  3691. // dst[i+7:i] := MAX(a[i+7:i], b[i+7:i])
  3692. // ENDFOR
  3693. //
  3694. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_pu8
  3695. FORCE_INLINE __m64 _mm_max_pu8(__m64 a, __m64 b)
  3696. {
  3697. return vreinterpret_m64_u8(
  3698. vmax_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b)));
  3699. }
  3700. // Compare packed unsigned 8-bit integers in a and b, and store packed maximum
  3701. // values in dst.
  3702. //
  3703. // FOR j := 0 to 7
  3704. // i := j*8
  3705. // dst[i+7:i] := MAX(a[i+7:i], b[i+7:i])
  3706. // ENDFOR
  3707. //
  3708. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_pu8
  3709. #define _m_pmaxub(a, b) _mm_max_pu8(a, b)
  3710. // Compare packed signed 16-bit integers in a and b, and store packed minimum
  3711. // values in dst.
  3712. //
  3713. // FOR j := 0 to 3
  3714. // i := j*16
  3715. // dst[i+15:i] := MIN(a[i+15:i], b[i+15:i])
  3716. // ENDFOR
  3717. //
  3718. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_pi16
  3719. FORCE_INLINE __m64 _mm_min_pi16(__m64 a, __m64 b)
  3720. {
  3721. return vreinterpret_m64_s16(
  3722. vmin_s16(vreinterpret_s16_m64(a), vreinterpret_s16_m64(b)));
  3723. }
  3724. // Compare packed signed 16-bit integers in a and b, and store packed minimum
  3725. // values in dst.
  3726. //
  3727. // FOR j := 0 to 3
  3728. // i := j*16
  3729. // dst[i+15:i] := MIN(a[i+15:i], b[i+15:i])
  3730. // ENDFOR
  3731. //
  3732. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_pi16
  3733. #define _m_pminsw(a, b) _mm_min_pi16(a, b)
  3734. // Computes the minima of the four single-precision, floating-point values of a
  3735. // and b.
  3736. // https://msdn.microsoft.com/en-us/library/vstudio/wh13kadz(v=vs.100).aspx
  3737. FORCE_INLINE __m128 _mm_min_ps(__m128 a, __m128 b)
  3738. {
  3739. #if SSE2NEON_PRECISE_MINMAX
  3740. float32x4_t _a = vreinterpretq_f32_m128(a);
  3741. float32x4_t _b = vreinterpretq_f32_m128(b);
  3742. return vbslq_f32(vcltq_f32(_a, _b), _a, _b);
  3743. #else
  3744. return vreinterpretq_m128_f32(
  3745. vminq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  3746. #endif
  3747. }
  3748. // Compare packed unsigned 8-bit integers in a and b, and store packed minimum
  3749. // values in dst.
  3750. //
  3751. // FOR j := 0 to 7
  3752. // i := j*8
  3753. // dst[i+7:i] := MIN(a[i+7:i], b[i+7:i])
  3754. // ENDFOR
  3755. //
  3756. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_pu8
  3757. FORCE_INLINE __m64 _mm_min_pu8(__m64 a, __m64 b)
  3758. {
  3759. return vreinterpret_m64_u8(
  3760. vmin_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b)));
  3761. }
  3762. // Compare packed unsigned 8-bit integers in a and b, and store packed minimum
  3763. // values in dst.
  3764. //
  3765. // FOR j := 0 to 7
  3766. // i := j*8
  3767. // dst[i+7:i] := MIN(a[i+7:i], b[i+7:i])
  3768. // ENDFOR
  3769. //
  3770. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_pu8
  3771. #define _m_pminub(a, b) _mm_min_pu8(a, b)
  3772. // Computes the maximum of the two lower scalar single-precision floating point
  3773. // values of a and b.
  3774. // https://msdn.microsoft.com/en-us/library/s6db5esz(v=vs.100).aspx
  3775. FORCE_INLINE __m128 _mm_max_ss(__m128 a, __m128 b)
  3776. {
  3777. float32_t value = vgetq_lane_f32(_mm_max_ps(a, b), 0);
  3778. return vreinterpretq_m128_f32(
  3779. vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0));
  3780. }
  3781. // Computes the minimum of the two lower scalar single-precision floating point
  3782. // values of a and b.
  3783. // https://msdn.microsoft.com/en-us/library/0a9y7xaa(v=vs.100).aspx
  3784. FORCE_INLINE __m128 _mm_min_ss(__m128 a, __m128 b)
  3785. {
  3786. float32_t value = vgetq_lane_f32(_mm_min_ps(a, b), 0);
  3787. return vreinterpretq_m128_f32(
  3788. vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0));
  3789. }
  3790. // Computes the pairwise maxima of the 16 unsigned 8-bit integers from a and the
  3791. // 16 unsigned 8-bit integers from b.
  3792. // https://msdn.microsoft.com/en-us/library/st6634za(v=vs.100).aspx
  3793. FORCE_INLINE __m128i _mm_max_epu8(__m128i a, __m128i b)
  3794. {
  3795. return vreinterpretq_m128i_u8(
  3796. vmaxq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
  3797. }
  3798. // Computes the pairwise minima of the 16 unsigned 8-bit integers from a and the
  3799. // 16 unsigned 8-bit integers from b.
  3800. // https://msdn.microsoft.com/ko-kr/library/17k8cf58(v=vs.100).aspxx
  3801. FORCE_INLINE __m128i _mm_min_epu8(__m128i a, __m128i b)
  3802. {
  3803. return vreinterpretq_m128i_u8(
  3804. vminq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
  3805. }
  3806. // Computes the pairwise minima of the 8 signed 16-bit integers from a and the 8
  3807. // signed 16-bit integers from b.
  3808. // https://msdn.microsoft.com/en-us/library/vstudio/6te997ew(v=vs.100).aspx
  3809. FORCE_INLINE __m128i _mm_min_epi16(__m128i a, __m128i b)
  3810. {
  3811. return vreinterpretq_m128i_s16(
  3812. vminq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
  3813. }
  3814. // Compare packed signed 8-bit integers in a and b, and store packed maximum
  3815. // values in dst.
  3816. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_epi8
  3817. FORCE_INLINE __m128i _mm_max_epi8(__m128i a, __m128i b)
  3818. {
  3819. return vreinterpretq_m128i_s8(
  3820. vmaxq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
  3821. }
  3822. // Compare packed unsigned 16-bit integers in a and b, and store packed maximum
  3823. // values in dst.
  3824. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_epu16
  3825. FORCE_INLINE __m128i _mm_max_epu16(__m128i a, __m128i b)
  3826. {
  3827. return vreinterpretq_m128i_u16(
  3828. vmaxq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b)));
  3829. }
  3830. // Compare packed signed 8-bit integers in a and b, and store packed minimum
  3831. // values in dst.
  3832. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_epi8
  3833. FORCE_INLINE __m128i _mm_min_epi8(__m128i a, __m128i b)
  3834. {
  3835. return vreinterpretq_m128i_s8(
  3836. vminq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
  3837. }
  3838. // Compare packed unsigned 16-bit integers in a and b, and store packed minimum
  3839. // values in dst.
  3840. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_epu16
  3841. FORCE_INLINE __m128i _mm_min_epu16(__m128i a, __m128i b)
  3842. {
  3843. return vreinterpretq_m128i_u16(
  3844. vminq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b)));
  3845. }
  3846. // Computes the pairwise maxima of the 8 signed 16-bit integers from a and the 8
  3847. // signed 16-bit integers from b.
  3848. // https://msdn.microsoft.com/en-us/LIBRary/3x060h7c(v=vs.100).aspx
  3849. FORCE_INLINE __m128i _mm_max_epi16(__m128i a, __m128i b)
  3850. {
  3851. return vreinterpretq_m128i_s16(
  3852. vmaxq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
  3853. }
  3854. // epi versions of min/max
  3855. // Computes the pariwise maximums of the four signed 32-bit integer values of a
  3856. // and b.
  3857. //
  3858. // A 128-bit parameter that can be defined with the following equations:
  3859. // r0 := (a0 > b0) ? a0 : b0
  3860. // r1 := (a1 > b1) ? a1 : b1
  3861. // r2 := (a2 > b2) ? a2 : b2
  3862. // r3 := (a3 > b3) ? a3 : b3
  3863. //
  3864. // https://msdn.microsoft.com/en-us/library/vstudio/bb514055(v=vs.100).aspx
  3865. FORCE_INLINE __m128i _mm_max_epi32(__m128i a, __m128i b)
  3866. {
  3867. return vreinterpretq_m128i_s32(
  3868. vmaxq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
  3869. }
  3870. // Computes the pariwise minima of the four signed 32-bit integer values of a
  3871. // and b.
  3872. //
  3873. // A 128-bit parameter that can be defined with the following equations:
  3874. // r0 := (a0 < b0) ? a0 : b0
  3875. // r1 := (a1 < b1) ? a1 : b1
  3876. // r2 := (a2 < b2) ? a2 : b2
  3877. // r3 := (a3 < b3) ? a3 : b3
  3878. //
  3879. // https://msdn.microsoft.com/en-us/library/vstudio/bb531476(v=vs.100).aspx
  3880. FORCE_INLINE __m128i _mm_min_epi32(__m128i a, __m128i b)
  3881. {
  3882. return vreinterpretq_m128i_s32(
  3883. vminq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
  3884. }
  3885. // Compare packed unsigned 32-bit integers in a and b, and store packed maximum
  3886. // values in dst.
  3887. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_epu32
  3888. FORCE_INLINE __m128i _mm_max_epu32(__m128i a, __m128i b)
  3889. {
  3890. return vreinterpretq_m128i_u32(
  3891. vmaxq_u32(vreinterpretq_u32_m128i(a), vreinterpretq_u32_m128i(b)));
  3892. }
  3893. // Compare packed unsigned 32-bit integers in a and b, and store packed minimum
  3894. // values in dst.
  3895. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_epu32
  3896. FORCE_INLINE __m128i _mm_min_epu32(__m128i a, __m128i b)
  3897. {
  3898. return vreinterpretq_m128i_u32(
  3899. vminq_u32(vreinterpretq_u32_m128i(a), vreinterpretq_u32_m128i(b)));
  3900. }
  3901. // Multiply the packed unsigned 16-bit integers in a and b, producing
  3902. // intermediate 32-bit integers, and store the high 16 bits of the intermediate
  3903. // integers in dst.
  3904. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mulhi_pu16
  3905. FORCE_INLINE __m64 _mm_mulhi_pu16(__m64 a, __m64 b)
  3906. {
  3907. return vreinterpret_m64_u16(vshrn_n_u32(
  3908. vmull_u16(vreinterpret_u16_m64(a), vreinterpret_u16_m64(b)), 16));
  3909. }
  3910. // Multiplies the 8 signed 16-bit integers from a by the 8 signed 16-bit
  3911. // integers from b.
  3912. //
  3913. // r0 := (a0 * b0)[31:16]
  3914. // r1 := (a1 * b1)[31:16]
  3915. // ...
  3916. // r7 := (a7 * b7)[31:16]
  3917. //
  3918. // https://msdn.microsoft.com/en-us/library/vstudio/59hddw1d(v=vs.100).aspx
  3919. FORCE_INLINE __m128i _mm_mulhi_epi16(__m128i a, __m128i b)
  3920. {
  3921. /* FIXME: issue with large values because of result saturation */
  3922. // int16x8_t ret = vqdmulhq_s16(vreinterpretq_s16_m128i(a),
  3923. // vreinterpretq_s16_m128i(b)); /* =2*a*b */ return
  3924. // vreinterpretq_m128i_s16(vshrq_n_s16(ret, 1));
  3925. int16x4_t a3210 = vget_low_s16(vreinterpretq_s16_m128i(a));
  3926. int16x4_t b3210 = vget_low_s16(vreinterpretq_s16_m128i(b));
  3927. int32x4_t ab3210 = vmull_s16(a3210, b3210); /* 3333222211110000 */
  3928. int16x4_t a7654 = vget_high_s16(vreinterpretq_s16_m128i(a));
  3929. int16x4_t b7654 = vget_high_s16(vreinterpretq_s16_m128i(b));
  3930. int32x4_t ab7654 = vmull_s16(a7654, b7654); /* 7777666655554444 */
  3931. uint16x8x2_t r =
  3932. vuzpq_u16(vreinterpretq_u16_s32(ab3210), vreinterpretq_u16_s32(ab7654));
  3933. return vreinterpretq_m128i_u16(r.val[1]);
  3934. }
  3935. // Multiply the packed unsigned 16-bit integers in a and b, producing
  3936. // intermediate 32-bit integers, and store the high 16 bits of the intermediate
  3937. // integers in dst.
  3938. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mulhi_epu16
  3939. FORCE_INLINE __m128i _mm_mulhi_epu16(__m128i a, __m128i b)
  3940. {
  3941. uint16x4_t a3210 = vget_low_u16(vreinterpretq_u16_m128i(a));
  3942. uint16x4_t b3210 = vget_low_u16(vreinterpretq_u16_m128i(b));
  3943. uint32x4_t ab3210 = vmull_u16(a3210, b3210);
  3944. #if defined(__aarch64__)
  3945. uint32x4_t ab7654 =
  3946. vmull_high_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b));
  3947. uint16x8_t r = vuzp2q_u16(vreinterpretq_u16_u32(ab3210),
  3948. vreinterpretq_u16_u32(ab7654));
  3949. return vreinterpretq_m128i_u16(r);
  3950. #else
  3951. uint16x4_t a7654 = vget_high_u16(vreinterpretq_u16_m128i(a));
  3952. uint16x4_t b7654 = vget_high_u16(vreinterpretq_u16_m128i(b));
  3953. uint32x4_t ab7654 = vmull_u16(a7654, b7654);
  3954. uint16x8x2_t r =
  3955. vuzpq_u16(vreinterpretq_u16_u32(ab3210), vreinterpretq_u16_u32(ab7654));
  3956. return vreinterpretq_m128i_u16(r.val[1]);
  3957. #endif
  3958. }
  3959. // Computes pairwise add of each argument as single-precision, floating-point
  3960. // values a and b.
  3961. // https://msdn.microsoft.com/en-us/library/yd9wecaa.aspx
  3962. FORCE_INLINE __m128 _mm_hadd_ps(__m128 a, __m128 b)
  3963. {
  3964. #if defined(__aarch64__)
  3965. return vreinterpretq_m128_f32(
  3966. vpaddq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  3967. #else
  3968. float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
  3969. float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
  3970. float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
  3971. float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
  3972. return vreinterpretq_m128_f32(
  3973. vcombine_f32(vpadd_f32(a10, a32), vpadd_f32(b10, b32)));
  3974. #endif
  3975. }
  3976. // Computes pairwise add of each argument as a 16-bit signed or unsigned integer
  3977. // values a and b.
  3978. FORCE_INLINE __m128i _mm_hadd_epi16(__m128i _a, __m128i _b)
  3979. {
  3980. int16x8_t a = vreinterpretq_s16_m128i(_a);
  3981. int16x8_t b = vreinterpretq_s16_m128i(_b);
  3982. #if defined(__aarch64__)
  3983. return vreinterpretq_m128i_s16(vpaddq_s16(a, b));
  3984. #else
  3985. return vreinterpretq_m128i_s16(
  3986. vcombine_s16(vpadd_s16(vget_low_s16(a), vget_high_s16(a)),
  3987. vpadd_s16(vget_low_s16(b), vget_high_s16(b))));
  3988. #endif
  3989. }
  3990. // Horizontally substract adjacent pairs of single-precision (32-bit)
  3991. // floating-point elements in a and b, and pack the results in dst.
  3992. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hsub_ps
  3993. FORCE_INLINE __m128 _mm_hsub_ps(__m128 _a, __m128 _b)
  3994. {
  3995. #if defined(__aarch64__)
  3996. return vreinterpretq_m128_f32(vsubq_f32(
  3997. vuzp1q_f32(vreinterpretq_f32_m128(_a), vreinterpretq_f32_m128(_b)),
  3998. vuzp2q_f32(vreinterpretq_f32_m128(_a), vreinterpretq_f32_m128(_b))));
  3999. #else
  4000. float32x4x2_t c =
  4001. vuzpq_f32(vreinterpretq_f32_m128(_a), vreinterpretq_f32_m128(_b));
  4002. return vreinterpretq_m128_f32(vsubq_f32(c.val[0], c.val[1]));
  4003. #endif
  4004. }
  4005. // Horizontally add adjacent pairs of 16-bit integers in a and b, and pack the
  4006. // signed 16-bit results in dst.
  4007. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hadd_pi16
  4008. FORCE_INLINE __m64 _mm_hadd_pi16(__m64 a, __m64 b)
  4009. {
  4010. return vreinterpret_m64_s16(
  4011. vpadd_s16(vreinterpret_s16_m64(a), vreinterpret_s16_m64(b)));
  4012. }
  4013. // Horizontally add adjacent pairs of 32-bit integers in a and b, and pack the
  4014. // signed 32-bit results in dst.
  4015. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hadd_pi32
  4016. FORCE_INLINE __m64 _mm_hadd_pi32(__m64 a, __m64 b)
  4017. {
  4018. return vreinterpret_m64_s32(
  4019. vpadd_s32(vreinterpret_s32_m64(a), vreinterpret_s32_m64(b)));
  4020. }
  4021. // Computes pairwise difference of each argument as a 16-bit signed or unsigned
  4022. // integer values a and b.
  4023. FORCE_INLINE __m128i _mm_hsub_epi16(__m128i _a, __m128i _b)
  4024. {
  4025. int32x4_t a = vreinterpretq_s32_m128i(_a);
  4026. int32x4_t b = vreinterpretq_s32_m128i(_b);
  4027. // Interleave using vshrn/vmovn
  4028. // [a0|a2|a4|a6|b0|b2|b4|b6]
  4029. // [a1|a3|a5|a7|b1|b3|b5|b7]
  4030. int16x8_t ab0246 = vcombine_s16(vmovn_s32(a), vmovn_s32(b));
  4031. int16x8_t ab1357 = vcombine_s16(vshrn_n_s32(a, 16), vshrn_n_s32(b, 16));
  4032. // Subtract
  4033. return vreinterpretq_m128i_s16(vsubq_s16(ab0246, ab1357));
  4034. }
  4035. // Computes saturated pairwise sub of each argument as a 16-bit signed
  4036. // integer values a and b.
  4037. FORCE_INLINE __m128i _mm_hadds_epi16(__m128i _a, __m128i _b)
  4038. {
  4039. #if defined(__aarch64__)
  4040. int16x8_t a = vreinterpretq_s16_m128i(_a);
  4041. int16x8_t b = vreinterpretq_s16_m128i(_b);
  4042. return vreinterpretq_s64_s16(
  4043. vqaddq_s16(vuzp1q_s16(a, b), vuzp2q_s16(a, b)));
  4044. #else
  4045. int32x4_t a = vreinterpretq_s32_m128i(_a);
  4046. int32x4_t b = vreinterpretq_s32_m128i(_b);
  4047. // Interleave using vshrn/vmovn
  4048. // [a0|a2|a4|a6|b0|b2|b4|b6]
  4049. // [a1|a3|a5|a7|b1|b3|b5|b7]
  4050. int16x8_t ab0246 = vcombine_s16(vmovn_s32(a), vmovn_s32(b));
  4051. int16x8_t ab1357 = vcombine_s16(vshrn_n_s32(a, 16), vshrn_n_s32(b, 16));
  4052. // Saturated add
  4053. return vreinterpretq_m128i_s16(vqaddq_s16(ab0246, ab1357));
  4054. #endif
  4055. }
  4056. // Computes saturated pairwise difference of each argument as a 16-bit signed
  4057. // integer values a and b.
  4058. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hsubs_epi16
  4059. FORCE_INLINE __m128i _mm_hsubs_epi16(__m128i _a, __m128i _b)
  4060. {
  4061. #if defined(__aarch64__)
  4062. int16x8_t a = vreinterpretq_s16_m128i(_a);
  4063. int16x8_t b = vreinterpretq_s16_m128i(_b);
  4064. return vreinterpretq_s64_s16(
  4065. vqsubq_s16(vuzp1q_s16(a, b), vuzp2q_s16(a, b)));
  4066. #else
  4067. int32x4_t a = vreinterpretq_s32_m128i(_a);
  4068. int32x4_t b = vreinterpretq_s32_m128i(_b);
  4069. // Interleave using vshrn/vmovn
  4070. // [a0|a2|a4|a6|b0|b2|b4|b6]
  4071. // [a1|a3|a5|a7|b1|b3|b5|b7]
  4072. int16x8_t ab0246 = vcombine_s16(vmovn_s32(a), vmovn_s32(b));
  4073. int16x8_t ab1357 = vcombine_s16(vshrn_n_s32(a, 16), vshrn_n_s32(b, 16));
  4074. // Saturated subtract
  4075. return vreinterpretq_m128i_s16(vqsubq_s16(ab0246, ab1357));
  4076. #endif
  4077. }
  4078. // Computes pairwise add of each argument as a 32-bit signed or unsigned integer
  4079. // values a and b.
  4080. FORCE_INLINE __m128i _mm_hadd_epi32(__m128i _a, __m128i _b)
  4081. {
  4082. int32x4_t a = vreinterpretq_s32_m128i(_a);
  4083. int32x4_t b = vreinterpretq_s32_m128i(_b);
  4084. return vreinterpretq_m128i_s32(
  4085. vcombine_s32(vpadd_s32(vget_low_s32(a), vget_high_s32(a)),
  4086. vpadd_s32(vget_low_s32(b), vget_high_s32(b))));
  4087. }
  4088. // Computes pairwise difference of each argument as a 32-bit signed or unsigned
  4089. // integer values a and b.
  4090. FORCE_INLINE __m128i _mm_hsub_epi32(__m128i _a, __m128i _b)
  4091. {
  4092. int64x2_t a = vreinterpretq_s64_m128i(_a);
  4093. int64x2_t b = vreinterpretq_s64_m128i(_b);
  4094. // Interleave using vshrn/vmovn
  4095. // [a0|a2|b0|b2]
  4096. // [a1|a2|b1|b3]
  4097. int32x4_t ab02 = vcombine_s32(vmovn_s64(a), vmovn_s64(b));
  4098. int32x4_t ab13 = vcombine_s32(vshrn_n_s64(a, 32), vshrn_n_s64(b, 32));
  4099. // Subtract
  4100. return vreinterpretq_m128i_s32(vsubq_s32(ab02, ab13));
  4101. }
  4102. // Kahan summation for accurate summation of floating-point numbers.
  4103. // http://blog.zachbjornson.com/2019/08/11/fast-float-summation.html
  4104. FORCE_INLINE void _sse2neon_kadd_f32(float *sum, float *c, float y)
  4105. {
  4106. y -= *c;
  4107. float t = *sum + y;
  4108. *c = (t - *sum) - y;
  4109. *sum = t;
  4110. }
  4111. // Conditionally multiply the packed single-precision (32-bit) floating-point
  4112. // elements in a and b using the high 4 bits in imm8, sum the four products,
  4113. // and conditionally store the sum in dst using the low 4 bits of imm.
  4114. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_dp_ps
  4115. FORCE_INLINE __m128 _mm_dp_ps(__m128 a, __m128 b, const int imm)
  4116. {
  4117. #if defined(__aarch64__)
  4118. /* shortcuts */
  4119. if (imm == 0xFF) {
  4120. return _mm_set1_ps(vaddvq_f32(_mm_mul_ps(a, b)));
  4121. }
  4122. if (imm == 0x7F) {
  4123. float32x4_t m = _mm_mul_ps(a, b);
  4124. m[3] = 0;
  4125. return _mm_set1_ps(vaddvq_f32(m));
  4126. }
  4127. #endif
  4128. float s = 0, c = 0;
  4129. float32x4_t f32a = vreinterpretq_f32_m128(a);
  4130. float32x4_t f32b = vreinterpretq_f32_m128(b);
  4131. /* To improve the accuracy of floating-point summation, Kahan algorithm
  4132. * is used for each operation.
  4133. */
  4134. if (imm & (1 << 4))
  4135. _sse2neon_kadd_f32(&s, &c, f32a[0] * f32b[0]);
  4136. if (imm & (1 << 5))
  4137. _sse2neon_kadd_f32(&s, &c, f32a[1] * f32b[1]);
  4138. if (imm & (1 << 6))
  4139. _sse2neon_kadd_f32(&s, &c, f32a[2] * f32b[2]);
  4140. if (imm & (1 << 7))
  4141. _sse2neon_kadd_f32(&s, &c, f32a[3] * f32b[3]);
  4142. s += c;
  4143. float32x4_t res = {
  4144. (imm & 0x1) ? s : 0,
  4145. (imm & 0x2) ? s : 0,
  4146. (imm & 0x4) ? s : 0,
  4147. (imm & 0x8) ? s : 0,
  4148. };
  4149. return vreinterpretq_m128_f32(res);
  4150. }
  4151. /* Compare operations */
  4152. // Compares for less than
  4153. // https://msdn.microsoft.com/en-us/library/vstudio/f330yhc8(v=vs.100).aspx
  4154. FORCE_INLINE __m128 _mm_cmplt_ps(__m128 a, __m128 b)
  4155. {
  4156. return vreinterpretq_m128_u32(
  4157. vcltq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  4158. }
  4159. // Compares for less than
  4160. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/fy94wye7(v=vs.100)
  4161. FORCE_INLINE __m128 _mm_cmplt_ss(__m128 a, __m128 b)
  4162. {
  4163. return _mm_move_ss(a, _mm_cmplt_ps(a, b));
  4164. }
  4165. // Compares for greater than.
  4166. //
  4167. // r0 := (a0 > b0) ? 0xffffffff : 0x0
  4168. // r1 := (a1 > b1) ? 0xffffffff : 0x0
  4169. // r2 := (a2 > b2) ? 0xffffffff : 0x0
  4170. // r3 := (a3 > b3) ? 0xffffffff : 0x0
  4171. //
  4172. // https://msdn.microsoft.com/en-us/library/vstudio/11dy102s(v=vs.100).aspx
  4173. FORCE_INLINE __m128 _mm_cmpgt_ps(__m128 a, __m128 b)
  4174. {
  4175. return vreinterpretq_m128_u32(
  4176. vcgtq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  4177. }
  4178. // Compares for greater than.
  4179. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/1xyyyy9e(v=vs.100)
  4180. FORCE_INLINE __m128 _mm_cmpgt_ss(__m128 a, __m128 b)
  4181. {
  4182. return _mm_move_ss(a, _mm_cmpgt_ps(a, b));
  4183. }
  4184. // Compares for greater than or equal.
  4185. // https://msdn.microsoft.com/en-us/library/vstudio/fs813y2t(v=vs.100).aspx
  4186. FORCE_INLINE __m128 _mm_cmpge_ps(__m128 a, __m128 b)
  4187. {
  4188. return vreinterpretq_m128_u32(
  4189. vcgeq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  4190. }
  4191. // Compares for greater than or equal.
  4192. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/kesh3ddc(v=vs.100)
  4193. FORCE_INLINE __m128 _mm_cmpge_ss(__m128 a, __m128 b)
  4194. {
  4195. return _mm_move_ss(a, _mm_cmpge_ps(a, b));
  4196. }
  4197. // Compares for less than or equal.
  4198. //
  4199. // r0 := (a0 <= b0) ? 0xffffffff : 0x0
  4200. // r1 := (a1 <= b1) ? 0xffffffff : 0x0
  4201. // r2 := (a2 <= b2) ? 0xffffffff : 0x0
  4202. // r3 := (a3 <= b3) ? 0xffffffff : 0x0
  4203. //
  4204. // https://msdn.microsoft.com/en-us/library/vstudio/1s75w83z(v=vs.100).aspx
  4205. FORCE_INLINE __m128 _mm_cmple_ps(__m128 a, __m128 b)
  4206. {
  4207. return vreinterpretq_m128_u32(
  4208. vcleq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  4209. }
  4210. // Compares for less than or equal.
  4211. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/a7x0hbhw(v=vs.100)
  4212. FORCE_INLINE __m128 _mm_cmple_ss(__m128 a, __m128 b)
  4213. {
  4214. return _mm_move_ss(a, _mm_cmple_ps(a, b));
  4215. }
  4216. // Compares for equality.
  4217. // https://msdn.microsoft.com/en-us/library/vstudio/36aectz5(v=vs.100).aspx
  4218. FORCE_INLINE __m128 _mm_cmpeq_ps(__m128 a, __m128 b)
  4219. {
  4220. return vreinterpretq_m128_u32(
  4221. vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  4222. }
  4223. // Compares for equality.
  4224. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/k423z28e(v=vs.100)
  4225. FORCE_INLINE __m128 _mm_cmpeq_ss(__m128 a, __m128 b)
  4226. {
  4227. return _mm_move_ss(a, _mm_cmpeq_ps(a, b));
  4228. }
  4229. // Compares for inequality.
  4230. // https://msdn.microsoft.com/en-us/library/sf44thbx(v=vs.100).aspx
  4231. FORCE_INLINE __m128 _mm_cmpneq_ps(__m128 a, __m128 b)
  4232. {
  4233. return vreinterpretq_m128_u32(vmvnq_u32(
  4234. vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))));
  4235. }
  4236. // Compares for inequality.
  4237. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/ekya8fh4(v=vs.100)
  4238. FORCE_INLINE __m128 _mm_cmpneq_ss(__m128 a, __m128 b)
  4239. {
  4240. return _mm_move_ss(a, _mm_cmpneq_ps(a, b));
  4241. }
  4242. // Compares for not greater than or equal.
  4243. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/wsexys62(v=vs.100)
  4244. FORCE_INLINE __m128 _mm_cmpnge_ps(__m128 a, __m128 b)
  4245. {
  4246. return _mm_cmplt_ps(a, b);
  4247. }
  4248. // Compares for not greater than or equal.
  4249. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/fk2y80s8(v=vs.100)
  4250. FORCE_INLINE __m128 _mm_cmpnge_ss(__m128 a, __m128 b)
  4251. {
  4252. return _mm_cmplt_ss(a, b);
  4253. }
  4254. // Compares for not greater than.
  4255. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/d0xh7w0s(v=vs.100)
  4256. FORCE_INLINE __m128 _mm_cmpngt_ps(__m128 a, __m128 b)
  4257. {
  4258. return _mm_cmple_ps(a, b);
  4259. }
  4260. // Compares for not greater than.
  4261. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/z7x9ydwh(v=vs.100)
  4262. FORCE_INLINE __m128 _mm_cmpngt_ss(__m128 a, __m128 b)
  4263. {
  4264. return _mm_cmple_ss(a, b);
  4265. }
  4266. // Compares for not less than or equal.
  4267. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/6a330kxw(v=vs.100)
  4268. FORCE_INLINE __m128 _mm_cmpnle_ps(__m128 a, __m128 b)
  4269. {
  4270. return _mm_cmpgt_ps(a, b);
  4271. }
  4272. // Compares for not less than or equal.
  4273. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/z7x9ydwh(v=vs.100)
  4274. FORCE_INLINE __m128 _mm_cmpnle_ss(__m128 a, __m128 b)
  4275. {
  4276. return _mm_cmpgt_ss(a, b);
  4277. }
  4278. // Compares for not less than.
  4279. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/4686bbdw(v=vs.100)
  4280. FORCE_INLINE __m128 _mm_cmpnlt_ps(__m128 a, __m128 b)
  4281. {
  4282. return _mm_cmpge_ps(a, b);
  4283. }
  4284. // Compares for not less than.
  4285. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/56b9z2wf(v=vs.100)
  4286. FORCE_INLINE __m128 _mm_cmpnlt_ss(__m128 a, __m128 b)
  4287. {
  4288. return _mm_cmpge_ss(a, b);
  4289. }
  4290. // Compares the 16 signed or unsigned 8-bit integers in a and the 16 signed or
  4291. // unsigned 8-bit integers in b for equality.
  4292. // https://msdn.microsoft.com/en-us/library/windows/desktop/bz5xk21a(v=vs.90).aspx
  4293. FORCE_INLINE __m128i _mm_cmpeq_epi8(__m128i a, __m128i b)
  4294. {
  4295. return vreinterpretq_m128i_u8(
  4296. vceqq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
  4297. }
  4298. // Compare packed double-precision (64-bit) floating-point elements in a and b
  4299. // for equality, and store the results in dst.
  4300. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpeq_pd
  4301. FORCE_INLINE __m128d _mm_cmpeq_pd(__m128d a, __m128d b)
  4302. {
  4303. #if defined(__aarch64__)
  4304. return vreinterpretq_m128d_u64(
  4305. vceqq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
  4306. #else
  4307. // (a == b) -> (a_lo == b_lo) && (a_hi == b_hi)
  4308. uint32x4_t cmp =
  4309. vceqq_u32(vreinterpretq_u32_m128d(a), vreinterpretq_u32_m128d(b));
  4310. uint32x4_t swapped = vrev64q_u32(cmp);
  4311. return vreinterpretq_m128d_u32(vandq_u32(cmp, swapped));
  4312. #endif
  4313. }
  4314. // Compares the 8 signed or unsigned 16-bit integers in a and the 8 signed or
  4315. // unsigned 16-bit integers in b for equality.
  4316. // https://msdn.microsoft.com/en-us/library/2ay060te(v=vs.100).aspx
  4317. FORCE_INLINE __m128i _mm_cmpeq_epi16(__m128i a, __m128i b)
  4318. {
  4319. return vreinterpretq_m128i_u16(
  4320. vceqq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
  4321. }
  4322. // Compare packed 32-bit integers in a and b for equality, and store the results
  4323. // in dst
  4324. FORCE_INLINE __m128i _mm_cmpeq_epi32(__m128i a, __m128i b)
  4325. {
  4326. return vreinterpretq_m128i_u32(
  4327. vceqq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
  4328. }
  4329. // Compare packed 64-bit integers in a and b for equality, and store the results
  4330. // in dst
  4331. FORCE_INLINE __m128i _mm_cmpeq_epi64(__m128i a, __m128i b)
  4332. {
  4333. #if defined(__aarch64__)
  4334. return vreinterpretq_m128i_u64(
  4335. vceqq_u64(vreinterpretq_u64_m128i(a), vreinterpretq_u64_m128i(b)));
  4336. #else
  4337. // ARMv7 lacks vceqq_u64
  4338. // (a == b) -> (a_lo == b_lo) && (a_hi == b_hi)
  4339. uint32x4_t cmp =
  4340. vceqq_u32(vreinterpretq_u32_m128i(a), vreinterpretq_u32_m128i(b));
  4341. uint32x4_t swapped = vrev64q_u32(cmp);
  4342. return vreinterpretq_m128i_u32(vandq_u32(cmp, swapped));
  4343. #endif
  4344. }
  4345. // Compares the 16 signed 8-bit integers in a and the 16 signed 8-bit integers
  4346. // in b for lesser than.
  4347. // https://msdn.microsoft.com/en-us/library/windows/desktop/9s46csht(v=vs.90).aspx
  4348. FORCE_INLINE __m128i _mm_cmplt_epi8(__m128i a, __m128i b)
  4349. {
  4350. return vreinterpretq_m128i_u8(
  4351. vcltq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
  4352. }
  4353. // Compares the 16 signed 8-bit integers in a and the 16 signed 8-bit integers
  4354. // in b for greater than.
  4355. //
  4356. // r0 := (a0 > b0) ? 0xff : 0x0
  4357. // r1 := (a1 > b1) ? 0xff : 0x0
  4358. // ...
  4359. // r15 := (a15 > b15) ? 0xff : 0x0
  4360. //
  4361. // https://msdn.microsoft.com/zh-tw/library/wf45zt2b(v=vs.100).aspx
  4362. FORCE_INLINE __m128i _mm_cmpgt_epi8(__m128i a, __m128i b)
  4363. {
  4364. return vreinterpretq_m128i_u8(
  4365. vcgtq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
  4366. }
  4367. // Compares the 8 signed 16-bit integers in a and the 8 signed 16-bit integers
  4368. // in b for less than.
  4369. //
  4370. // r0 := (a0 < b0) ? 0xffff : 0x0
  4371. // r1 := (a1 < b1) ? 0xffff : 0x0
  4372. // ...
  4373. // r7 := (a7 < b7) ? 0xffff : 0x0
  4374. //
  4375. // https://technet.microsoft.com/en-us/library/t863edb2(v=vs.100).aspx
  4376. FORCE_INLINE __m128i _mm_cmplt_epi16(__m128i a, __m128i b)
  4377. {
  4378. return vreinterpretq_m128i_u16(
  4379. vcltq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
  4380. }
  4381. // Compares the 8 signed 16-bit integers in a and the 8 signed 16-bit integers
  4382. // in b for greater than.
  4383. //
  4384. // r0 := (a0 > b0) ? 0xffff : 0x0
  4385. // r1 := (a1 > b1) ? 0xffff : 0x0
  4386. // ...
  4387. // r7 := (a7 > b7) ? 0xffff : 0x0
  4388. //
  4389. // https://technet.microsoft.com/en-us/library/xd43yfsa(v=vs.100).aspx
  4390. FORCE_INLINE __m128i _mm_cmpgt_epi16(__m128i a, __m128i b)
  4391. {
  4392. return vreinterpretq_m128i_u16(
  4393. vcgtq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
  4394. }
  4395. // Compares the 4 signed 32-bit integers in a and the 4 signed 32-bit integers
  4396. // in b for less than.
  4397. // https://msdn.microsoft.com/en-us/library/vstudio/4ak0bf5d(v=vs.100).aspx
  4398. FORCE_INLINE __m128i _mm_cmplt_epi32(__m128i a, __m128i b)
  4399. {
  4400. return vreinterpretq_m128i_u32(
  4401. vcltq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
  4402. }
  4403. // Compares the 4 signed 32-bit integers in a and the 4 signed 32-bit integers
  4404. // in b for greater than.
  4405. // https://msdn.microsoft.com/en-us/library/vstudio/1s9f2z0y(v=vs.100).aspx
  4406. FORCE_INLINE __m128i _mm_cmpgt_epi32(__m128i a, __m128i b)
  4407. {
  4408. return vreinterpretq_m128i_u32(
  4409. vcgtq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
  4410. }
  4411. // Compares the 2 signed 64-bit integers in a and the 2 signed 64-bit integers
  4412. // in b for greater than.
  4413. FORCE_INLINE __m128i _mm_cmpgt_epi64(__m128i a, __m128i b)
  4414. {
  4415. #if defined(__aarch64__)
  4416. return vreinterpretq_m128i_u64(
  4417. vcgtq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b)));
  4418. #else
  4419. // ARMv7 lacks vcgtq_s64.
  4420. // This is based off of Clang's SSE2 polyfill:
  4421. // (a > b) -> ((a_hi > b_hi) || (a_lo > b_lo && a_hi == b_hi))
  4422. // Mask the sign bit out since we need a signed AND an unsigned comparison
  4423. // and it is ugly to try and split them.
  4424. int32x4_t mask = vreinterpretq_s32_s64(vdupq_n_s64(0x80000000ull));
  4425. int32x4_t a_mask = veorq_s32(vreinterpretq_s32_m128i(a), mask);
  4426. int32x4_t b_mask = veorq_s32(vreinterpretq_s32_m128i(b), mask);
  4427. // Check if a > b
  4428. int64x2_t greater = vreinterpretq_s64_u32(vcgtq_s32(a_mask, b_mask));
  4429. // Copy upper mask to lower mask
  4430. // a_hi > b_hi
  4431. int64x2_t gt_hi = vshrq_n_s64(greater, 63);
  4432. // Copy lower mask to upper mask
  4433. // a_lo > b_lo
  4434. int64x2_t gt_lo = vsliq_n_s64(greater, greater, 32);
  4435. // Compare for equality
  4436. int64x2_t equal = vreinterpretq_s64_u32(vceqq_s32(a_mask, b_mask));
  4437. // Copy upper mask to lower mask
  4438. // a_hi == b_hi
  4439. int64x2_t eq_hi = vshrq_n_s64(equal, 63);
  4440. // a_hi > b_hi || (a_lo > b_lo && a_hi == b_hi)
  4441. int64x2_t ret = vorrq_s64(gt_hi, vandq_s64(gt_lo, eq_hi));
  4442. return vreinterpretq_m128i_s64(ret);
  4443. #endif
  4444. }
  4445. // Compares the four 32-bit floats in a and b to check if any values are NaN.
  4446. // Ordered compare between each value returns true for "orderable" and false for
  4447. // "not orderable" (NaN).
  4448. // https://msdn.microsoft.com/en-us/library/vstudio/0h9w00fx(v=vs.100).aspx see
  4449. // also:
  4450. // http://stackoverflow.com/questions/8627331/what-does-ordered-unordered-comparison-mean
  4451. // http://stackoverflow.com/questions/29349621/neon-isnanval-intrinsics
  4452. FORCE_INLINE __m128 _mm_cmpord_ps(__m128 a, __m128 b)
  4453. {
  4454. // Note: NEON does not have ordered compare builtin
  4455. // Need to compare a eq a and b eq b to check for NaN
  4456. // Do AND of results to get final
  4457. uint32x4_t ceqaa =
  4458. vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
  4459. uint32x4_t ceqbb =
  4460. vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
  4461. return vreinterpretq_m128_u32(vandq_u32(ceqaa, ceqbb));
  4462. }
  4463. // Compares for ordered.
  4464. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/343t62da(v=vs.100)
  4465. FORCE_INLINE __m128 _mm_cmpord_ss(__m128 a, __m128 b)
  4466. {
  4467. return _mm_move_ss(a, _mm_cmpord_ps(a, b));
  4468. }
  4469. // Compares for unordered.
  4470. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/khy6fk1t(v=vs.100)
  4471. FORCE_INLINE __m128 _mm_cmpunord_ps(__m128 a, __m128 b)
  4472. {
  4473. uint32x4_t f32a =
  4474. vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
  4475. uint32x4_t f32b =
  4476. vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
  4477. return vreinterpretq_m128_u32(vmvnq_u32(vandq_u32(f32a, f32b)));
  4478. }
  4479. // Compares for unordered.
  4480. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/2as2387b(v=vs.100)
  4481. FORCE_INLINE __m128 _mm_cmpunord_ss(__m128 a, __m128 b)
  4482. {
  4483. return _mm_move_ss(a, _mm_cmpunord_ps(a, b));
  4484. }
  4485. // Compares the lower single-precision floating point scalar values of a and b
  4486. // using a less than operation. :
  4487. // https://msdn.microsoft.com/en-us/library/2kwe606b(v=vs.90).aspx Important
  4488. // note!! The documentation on MSDN is incorrect! If either of the values is a
  4489. // NAN the docs say you will get a one, but in fact, it will return a zero!!
  4490. FORCE_INLINE int _mm_comilt_ss(__m128 a, __m128 b)
  4491. {
  4492. uint32x4_t a_not_nan =
  4493. vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
  4494. uint32x4_t b_not_nan =
  4495. vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
  4496. uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
  4497. uint32x4_t a_lt_b =
  4498. vcltq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
  4499. return (vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_lt_b), 0) != 0) ? 1 : 0;
  4500. }
  4501. // Compares the lower single-precision floating point scalar values of a and b
  4502. // using a greater than operation. :
  4503. // https://msdn.microsoft.com/en-us/library/b0738e0t(v=vs.100).aspx
  4504. FORCE_INLINE int _mm_comigt_ss(__m128 a, __m128 b)
  4505. {
  4506. // return vgetq_lane_u32(vcgtq_f32(vreinterpretq_f32_m128(a),
  4507. // vreinterpretq_f32_m128(b)), 0);
  4508. uint32x4_t a_not_nan =
  4509. vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
  4510. uint32x4_t b_not_nan =
  4511. vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
  4512. uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
  4513. uint32x4_t a_gt_b =
  4514. vcgtq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
  4515. return (vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0) ? 1 : 0;
  4516. }
  4517. // Compares the lower single-precision floating point scalar values of a and b
  4518. // using a less than or equal operation. :
  4519. // https://msdn.microsoft.com/en-us/library/1w4t7c57(v=vs.90).aspx
  4520. FORCE_INLINE int _mm_comile_ss(__m128 a, __m128 b)
  4521. {
  4522. // return vgetq_lane_u32(vcleq_f32(vreinterpretq_f32_m128(a),
  4523. // vreinterpretq_f32_m128(b)), 0);
  4524. uint32x4_t a_not_nan =
  4525. vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
  4526. uint32x4_t b_not_nan =
  4527. vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
  4528. uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
  4529. uint32x4_t a_le_b =
  4530. vcleq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
  4531. return (vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_le_b), 0) != 0) ? 1 : 0;
  4532. }
  4533. // Compares the lower single-precision floating point scalar values of a and b
  4534. // using a greater than or equal operation. :
  4535. // https://msdn.microsoft.com/en-us/library/8t80des6(v=vs.100).aspx
  4536. FORCE_INLINE int _mm_comige_ss(__m128 a, __m128 b)
  4537. {
  4538. // return vgetq_lane_u32(vcgeq_f32(vreinterpretq_f32_m128(a),
  4539. // vreinterpretq_f32_m128(b)), 0);
  4540. uint32x4_t a_not_nan =
  4541. vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
  4542. uint32x4_t b_not_nan =
  4543. vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
  4544. uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
  4545. uint32x4_t a_ge_b =
  4546. vcgeq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
  4547. return (vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0) ? 1 : 0;
  4548. }
  4549. // Compares the lower single-precision floating point scalar values of a and b
  4550. // using an equality operation. :
  4551. // https://msdn.microsoft.com/en-us/library/93yx2h2b(v=vs.100).aspx
  4552. FORCE_INLINE int _mm_comieq_ss(__m128 a, __m128 b)
  4553. {
  4554. // return vgetq_lane_u32(vceqq_f32(vreinterpretq_f32_m128(a),
  4555. // vreinterpretq_f32_m128(b)), 0);
  4556. uint32x4_t a_not_nan =
  4557. vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
  4558. uint32x4_t b_not_nan =
  4559. vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
  4560. uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
  4561. uint32x4_t a_eq_b =
  4562. vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
  4563. return (vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_eq_b), 0) != 0) ? 1 : 0;
  4564. }
  4565. // Compares the lower single-precision floating point scalar values of a and b
  4566. // using an inequality operation. :
  4567. // https://msdn.microsoft.com/en-us/library/bafh5e0a(v=vs.90).aspx
  4568. FORCE_INLINE int _mm_comineq_ss(__m128 a, __m128 b)
  4569. {
  4570. // return !vgetq_lane_u32(vceqq_f32(vreinterpretq_f32_m128(a),
  4571. // vreinterpretq_f32_m128(b)), 0);
  4572. uint32x4_t a_not_nan =
  4573. vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
  4574. uint32x4_t b_not_nan =
  4575. vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
  4576. uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
  4577. uint32x4_t a_neq_b = vmvnq_u32(
  4578. vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  4579. return (vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_neq_b), 0) != 0) ? 1 : 0;
  4580. }
  4581. // according to the documentation, these intrinsics behave the same as the
  4582. // non-'u' versions. We'll just alias them here.
  4583. #define _mm_ucomieq_ss _mm_comieq_ss
  4584. #define _mm_ucomige_ss _mm_comige_ss
  4585. #define _mm_ucomigt_ss _mm_comigt_ss
  4586. #define _mm_ucomile_ss _mm_comile_ss
  4587. #define _mm_ucomilt_ss _mm_comilt_ss
  4588. #define _mm_ucomineq_ss _mm_comineq_ss
  4589. /* Conversions */
  4590. // Convert packed signed 32-bit integers in b to packed single-precision
  4591. // (32-bit) floating-point elements, store the results in the lower 2 elements
  4592. // of dst, and copy the upper 2 packed elements from a to the upper elements of
  4593. // dst.
  4594. //
  4595. // dst[31:0] := Convert_Int32_To_FP32(b[31:0])
  4596. // dst[63:32] := Convert_Int32_To_FP32(b[63:32])
  4597. // dst[95:64] := a[95:64]
  4598. // dst[127:96] := a[127:96]
  4599. //
  4600. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_pi2ps
  4601. FORCE_INLINE __m128 _mm_cvt_pi2ps(__m128 a, __m64 b)
  4602. {
  4603. return vreinterpretq_m128_f32(
  4604. vcombine_f32(vcvt_f32_s32(vreinterpret_s32_m64(b)),
  4605. vget_high_f32(vreinterpretq_f32_m128(a))));
  4606. }
  4607. // Convert the signed 32-bit integer b to a single-precision (32-bit)
  4608. // floating-point element, store the result in the lower element of dst, and
  4609. // copy the upper 3 packed elements from a to the upper elements of dst.
  4610. //
  4611. // dst[31:0] := Convert_Int32_To_FP32(b[31:0])
  4612. // dst[127:32] := a[127:32]
  4613. //
  4614. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_si2ss
  4615. FORCE_INLINE __m128 _mm_cvt_si2ss(__m128 a, int b)
  4616. {
  4617. return vreinterpretq_m128_f32(
  4618. vsetq_lane_f32((float) b, vreinterpretq_f32_m128(a), 0));
  4619. }
  4620. // Convert the signed 32-bit integer b to a single-precision (32-bit)
  4621. // floating-point element, store the result in the lower element of dst, and
  4622. // copy the upper 3 packed elements from a to the upper elements of dst.
  4623. //
  4624. // dst[31:0] := Convert_Int32_To_FP32(b[31:0])
  4625. // dst[127:32] := a[127:32]
  4626. //
  4627. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi32_ss
  4628. #define _mm_cvtsi32_ss(a, b) _mm_cvt_si2ss(a, b)
  4629. // Convert the signed 64-bit integer b to a single-precision (32-bit)
  4630. // floating-point element, store the result in the lower element of dst, and
  4631. // copy the upper 3 packed elements from a to the upper elements of dst.
  4632. //
  4633. // dst[31:0] := Convert_Int64_To_FP32(b[63:0])
  4634. // dst[127:32] := a[127:32]
  4635. //
  4636. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi64_ss
  4637. FORCE_INLINE __m128 _mm_cvtsi64_ss(__m128 a, int64_t b)
  4638. {
  4639. return vreinterpretq_m128_f32(
  4640. vsetq_lane_f32((float) b, vreinterpretq_f32_m128(a), 0));
  4641. }
  4642. // Convert the lower single-precision (32-bit) floating-point element in a to a
  4643. // 32-bit integer, and store the result in dst.
  4644. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_ss2si
  4645. FORCE_INLINE int _mm_cvt_ss2si(__m128 a)
  4646. {
  4647. #if defined(__aarch64__)
  4648. return vgetq_lane_s32(vcvtnq_s32_f32(vreinterpretq_f32_m128(a)), 0);
  4649. #else
  4650. float32_t data = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
  4651. float32_t diff = data - floor(data);
  4652. if (diff > 0.5)
  4653. return (int32_t) ceil(data);
  4654. if (unlikely(diff == 0.5)) {
  4655. int32_t f = (int32_t) floor(data);
  4656. int32_t c = (int32_t) ceil(data);
  4657. return c & 1 ? f : c;
  4658. }
  4659. return (int32_t) floor(data);
  4660. #endif
  4661. }
  4662. // Convert packed 16-bit integers in a to packed single-precision (32-bit)
  4663. // floating-point elements, and store the results in dst.
  4664. //
  4665. // FOR j := 0 to 3
  4666. // i := j*16
  4667. // m := j*32
  4668. // dst[m+31:m] := Convert_Int16_To_FP32(a[i+15:i])
  4669. // ENDFOR
  4670. //
  4671. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpi16_ps
  4672. FORCE_INLINE __m128 _mm_cvtpi16_ps(__m64 a)
  4673. {
  4674. return vreinterpretq_m128_f32(
  4675. vcvtq_f32_s32(vmovl_s16(vreinterpret_s16_m64(a))));
  4676. }
  4677. // Convert packed 32-bit integers in b to packed single-precision (32-bit)
  4678. // floating-point elements, store the results in the lower 2 elements of dst,
  4679. // and copy the upper 2 packed elements from a to the upper elements of dst.
  4680. //
  4681. // dst[31:0] := Convert_Int32_To_FP32(b[31:0])
  4682. // dst[63:32] := Convert_Int32_To_FP32(b[63:32])
  4683. // dst[95:64] := a[95:64]
  4684. // dst[127:96] := a[127:96]
  4685. //
  4686. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpi32_ps
  4687. FORCE_INLINE __m128 _mm_cvtpi32_ps(__m128 a, __m64 b)
  4688. {
  4689. return vreinterpretq_m128_f32(
  4690. vcombine_f32(vcvt_f32_s32(vreinterpret_s32_m64(b)),
  4691. vget_high_f32(vreinterpretq_f32_m128(a))));
  4692. }
  4693. // Convert packed signed 32-bit integers in a to packed single-precision
  4694. // (32-bit) floating-point elements, store the results in the lower 2 elements
  4695. // of dst, then covert the packed signed 32-bit integers in b to
  4696. // single-precision (32-bit) floating-point element, and store the results in
  4697. // the upper 2 elements of dst.
  4698. //
  4699. // dst[31:0] := Convert_Int32_To_FP32(a[31:0])
  4700. // dst[63:32] := Convert_Int32_To_FP32(a[63:32])
  4701. // dst[95:64] := Convert_Int32_To_FP32(b[31:0])
  4702. // dst[127:96] := Convert_Int32_To_FP32(b[63:32])
  4703. //
  4704. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpi32x2_ps
  4705. FORCE_INLINE __m128 _mm_cvtpi32x2_ps(__m64 a, __m64 b)
  4706. {
  4707. return vreinterpretq_m128_f32(vcvtq_f32_s32(
  4708. vcombine_s32(vreinterpret_s32_m64(a), vreinterpret_s32_m64(b))));
  4709. }
  4710. // Convert the lower packed 8-bit integers in a to packed single-precision
  4711. // (32-bit) floating-point elements, and store the results in dst.
  4712. //
  4713. // FOR j := 0 to 3
  4714. // i := j*8
  4715. // m := j*32
  4716. // dst[m+31:m] := Convert_Int8_To_FP32(a[i+7:i])
  4717. // ENDFOR
  4718. //
  4719. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpi8_ps
  4720. FORCE_INLINE __m128 _mm_cvtpi8_ps(__m64 a)
  4721. {
  4722. return vreinterpretq_m128_f32(vcvtq_f32_s32(
  4723. vmovl_s16(vget_low_s16(vmovl_s8(vreinterpret_s8_m64(a))))));
  4724. }
  4725. // Convert packed unsigned 16-bit integers in a to packed single-precision
  4726. // (32-bit) floating-point elements, and store the results in dst.
  4727. //
  4728. // FOR j := 0 to 3
  4729. // i := j*16
  4730. // m := j*32
  4731. // dst[m+31:m] := Convert_UInt16_To_FP32(a[i+15:i])
  4732. // ENDFOR
  4733. //
  4734. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpu16_ps
  4735. FORCE_INLINE __m128 _mm_cvtpu16_ps(__m64 a)
  4736. {
  4737. return vreinterpretq_m128_f32(
  4738. vcvtq_f32_u32(vmovl_u16(vreinterpret_u16_m64(a))));
  4739. }
  4740. // Convert the lower packed unsigned 8-bit integers in a to packed
  4741. // single-precision (32-bit) floating-point elements, and store the results in
  4742. // dst.
  4743. //
  4744. // FOR j := 0 to 3
  4745. // i := j*8
  4746. // m := j*32
  4747. // dst[m+31:m] := Convert_UInt8_To_FP32(a[i+7:i])
  4748. // ENDFOR
  4749. //
  4750. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpu8_ps
  4751. FORCE_INLINE __m128 _mm_cvtpu8_ps(__m64 a)
  4752. {
  4753. return vreinterpretq_m128_f32(vcvtq_f32_u32(
  4754. vmovl_u16(vget_low_u16(vmovl_u8(vreinterpret_u8_m64(a))))));
  4755. }
  4756. // Converts the four single-precision, floating-point values of a to signed
  4757. // 32-bit integer values using truncate.
  4758. // https://msdn.microsoft.com/en-us/library/vstudio/1h005y6x(v=vs.100).aspx
  4759. FORCE_INLINE __m128i _mm_cvttps_epi32(__m128 a)
  4760. {
  4761. return vreinterpretq_m128i_s32(vcvtq_s32_f32(vreinterpretq_f32_m128(a)));
  4762. }
  4763. // Convert the lower double-precision (64-bit) floating-point element in a to a
  4764. // 64-bit integer with truncation, and store the result in dst.
  4765. //
  4766. // dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0])
  4767. //
  4768. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsd_si64
  4769. FORCE_INLINE int64_t _mm_cvttsd_si64(__m128d a)
  4770. {
  4771. #if defined(__aarch64__)
  4772. return vgetq_lane_s64(vcvtq_s64_f64(vreinterpretq_f64_m128d(a)), 0);
  4773. #else
  4774. double ret = *((double *) &a);
  4775. return (int64_t) ret;
  4776. #endif
  4777. }
  4778. // Convert the lower double-precision (64-bit) floating-point element in a to a
  4779. // 64-bit integer with truncation, and store the result in dst.
  4780. //
  4781. // dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0])
  4782. //
  4783. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsd_si64x
  4784. #define _mm_cvttsd_si64x(a) _mm_cvttsd_si64(a)
  4785. // Converts the four signed 32-bit integer values of a to single-precision,
  4786. // floating-point values
  4787. // https://msdn.microsoft.com/en-us/library/vstudio/36bwxcx5(v=vs.100).aspx
  4788. FORCE_INLINE __m128 _mm_cvtepi32_ps(__m128i a)
  4789. {
  4790. return vreinterpretq_m128_f32(vcvtq_f32_s32(vreinterpretq_s32_m128i(a)));
  4791. }
  4792. // Converts the four unsigned 8-bit integers in the lower 16 bits to four
  4793. // unsigned 32-bit integers.
  4794. FORCE_INLINE __m128i _mm_cvtepu8_epi16(__m128i a)
  4795. {
  4796. uint8x16_t u8x16 = vreinterpretq_u8_m128i(a); /* xxxx xxxx xxxx DCBA */
  4797. uint16x8_t u16x8 = vmovl_u8(vget_low_u8(u8x16)); /* 0x0x 0x0x 0D0C 0B0A */
  4798. return vreinterpretq_m128i_u16(u16x8);
  4799. }
  4800. // Converts the four unsigned 8-bit integers in the lower 32 bits to four
  4801. // unsigned 32-bit integers.
  4802. // https://msdn.microsoft.com/en-us/library/bb531467%28v=vs.100%29.aspx
  4803. FORCE_INLINE __m128i _mm_cvtepu8_epi32(__m128i a)
  4804. {
  4805. uint8x16_t u8x16 = vreinterpretq_u8_m128i(a); /* xxxx xxxx xxxx DCBA */
  4806. uint16x8_t u16x8 = vmovl_u8(vget_low_u8(u8x16)); /* 0x0x 0x0x 0D0C 0B0A */
  4807. uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000D 000C 000B 000A */
  4808. return vreinterpretq_m128i_u32(u32x4);
  4809. }
  4810. // Converts the two unsigned 8-bit integers in the lower 16 bits to two
  4811. // unsigned 64-bit integers.
  4812. FORCE_INLINE __m128i _mm_cvtepu8_epi64(__m128i a)
  4813. {
  4814. uint8x16_t u8x16 = vreinterpretq_u8_m128i(a); /* xxxx xxxx xxxx xxBA */
  4815. uint16x8_t u16x8 = vmovl_u8(vget_low_u8(u8x16)); /* 0x0x 0x0x 0x0x 0B0A */
  4816. uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000x 000x 000B 000A */
  4817. uint64x2_t u64x2 = vmovl_u32(vget_low_u32(u32x4)); /* 0000 000B 0000 000A */
  4818. return vreinterpretq_m128i_u64(u64x2);
  4819. }
  4820. // Converts the four unsigned 8-bit integers in the lower 16 bits to four
  4821. // unsigned 32-bit integers.
  4822. FORCE_INLINE __m128i _mm_cvtepi8_epi16(__m128i a)
  4823. {
  4824. int8x16_t s8x16 = vreinterpretq_s8_m128i(a); /* xxxx xxxx xxxx DCBA */
  4825. int16x8_t s16x8 = vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0D0C 0B0A */
  4826. return vreinterpretq_m128i_s16(s16x8);
  4827. }
  4828. // Converts the four unsigned 8-bit integers in the lower 32 bits to four
  4829. // unsigned 32-bit integers.
  4830. FORCE_INLINE __m128i _mm_cvtepi8_epi32(__m128i a)
  4831. {
  4832. int8x16_t s8x16 = vreinterpretq_s8_m128i(a); /* xxxx xxxx xxxx DCBA */
  4833. int16x8_t s16x8 = vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0D0C 0B0A */
  4834. int32x4_t s32x4 = vmovl_s16(vget_low_s16(s16x8)); /* 000D 000C 000B 000A */
  4835. return vreinterpretq_m128i_s32(s32x4);
  4836. }
  4837. // Converts the two signed 8-bit integers in the lower 32 bits to four
  4838. // signed 64-bit integers.
  4839. FORCE_INLINE __m128i _mm_cvtepi8_epi64(__m128i a)
  4840. {
  4841. int8x16_t s8x16 = vreinterpretq_s8_m128i(a); /* xxxx xxxx xxxx xxBA */
  4842. int16x8_t s16x8 = vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0x0x 0B0A */
  4843. int32x4_t s32x4 = vmovl_s16(vget_low_s16(s16x8)); /* 000x 000x 000B 000A */
  4844. int64x2_t s64x2 = vmovl_s32(vget_low_s32(s32x4)); /* 0000 000B 0000 000A */
  4845. return vreinterpretq_m128i_s64(s64x2);
  4846. }
  4847. // Converts the four signed 16-bit integers in the lower 64 bits to four signed
  4848. // 32-bit integers.
  4849. FORCE_INLINE __m128i _mm_cvtepi16_epi32(__m128i a)
  4850. {
  4851. return vreinterpretq_m128i_s32(
  4852. vmovl_s16(vget_low_s16(vreinterpretq_s16_m128i(a))));
  4853. }
  4854. // Converts the two signed 16-bit integers in the lower 32 bits two signed
  4855. // 32-bit integers.
  4856. FORCE_INLINE __m128i _mm_cvtepi16_epi64(__m128i a)
  4857. {
  4858. int16x8_t s16x8 = vreinterpretq_s16_m128i(a); /* xxxx xxxx xxxx 0B0A */
  4859. int32x4_t s32x4 = vmovl_s16(vget_low_s16(s16x8)); /* 000x 000x 000B 000A */
  4860. int64x2_t s64x2 = vmovl_s32(vget_low_s32(s32x4)); /* 0000 000B 0000 000A */
  4861. return vreinterpretq_m128i_s64(s64x2);
  4862. }
  4863. // Converts the four unsigned 16-bit integers in the lower 64 bits to four
  4864. // unsigned 32-bit integers.
  4865. FORCE_INLINE __m128i _mm_cvtepu16_epi32(__m128i a)
  4866. {
  4867. return vreinterpretq_m128i_u32(
  4868. vmovl_u16(vget_low_u16(vreinterpretq_u16_m128i(a))));
  4869. }
  4870. // Converts the two unsigned 16-bit integers in the lower 32 bits to two
  4871. // unsigned 64-bit integers.
  4872. FORCE_INLINE __m128i _mm_cvtepu16_epi64(__m128i a)
  4873. {
  4874. uint16x8_t u16x8 = vreinterpretq_u16_m128i(a); /* xxxx xxxx xxxx 0B0A */
  4875. uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000x 000x 000B 000A */
  4876. uint64x2_t u64x2 = vmovl_u32(vget_low_u32(u32x4)); /* 0000 000B 0000 000A */
  4877. return vreinterpretq_m128i_u64(u64x2);
  4878. }
  4879. // Converts the two unsigned 32-bit integers in the lower 64 bits to two
  4880. // unsigned 64-bit integers.
  4881. FORCE_INLINE __m128i _mm_cvtepu32_epi64(__m128i a)
  4882. {
  4883. return vreinterpretq_m128i_u64(
  4884. vmovl_u32(vget_low_u32(vreinterpretq_u32_m128i(a))));
  4885. }
  4886. // Converts the two signed 32-bit integers in the lower 64 bits to two signed
  4887. // 64-bit integers.
  4888. FORCE_INLINE __m128i _mm_cvtepi32_epi64(__m128i a)
  4889. {
  4890. return vreinterpretq_m128i_s64(
  4891. vmovl_s32(vget_low_s32(vreinterpretq_s32_m128i(a))));
  4892. }
  4893. // Converts the four single-precision, floating-point values of a to signed
  4894. // 32-bit integer values.
  4895. //
  4896. // r0 := (int) a0
  4897. // r1 := (int) a1
  4898. // r2 := (int) a2
  4899. // r3 := (int) a3
  4900. //
  4901. // https://msdn.microsoft.com/en-us/library/vstudio/xdc42k5e(v=vs.100).aspx
  4902. // *NOTE*. The default rounding mode on SSE is 'round to even', which ARMv7-A
  4903. // does not support! It is supported on ARMv8-A however.
  4904. FORCE_INLINE __m128i _mm_cvtps_epi32(__m128 a)
  4905. {
  4906. #if defined(__aarch64__)
  4907. return vreinterpretq_m128i_s32(vcvtnq_s32_f32(a));
  4908. #else
  4909. uint32x4_t signmask = vdupq_n_u32(0x80000000);
  4910. float32x4_t half = vbslq_f32(signmask, vreinterpretq_f32_m128(a),
  4911. vdupq_n_f32(0.5f)); /* +/- 0.5 */
  4912. int32x4_t r_normal = vcvtq_s32_f32(vaddq_f32(
  4913. vreinterpretq_f32_m128(a), half)); /* round to integer: [a + 0.5]*/
  4914. int32x4_t r_trunc =
  4915. vcvtq_s32_f32(vreinterpretq_f32_m128(a)); /* truncate to integer: [a] */
  4916. int32x4_t plusone = vreinterpretq_s32_u32(vshrq_n_u32(
  4917. vreinterpretq_u32_s32(vnegq_s32(r_trunc)), 31)); /* 1 or 0 */
  4918. int32x4_t r_even = vbicq_s32(vaddq_s32(r_trunc, plusone),
  4919. vdupq_n_s32(1)); /* ([a] + {0,1}) & ~1 */
  4920. float32x4_t delta = vsubq_f32(
  4921. vreinterpretq_f32_m128(a),
  4922. vcvtq_f32_s32(r_trunc)); /* compute delta: delta = (a - [a]) */
  4923. uint32x4_t is_delta_half = vceqq_f32(delta, half); /* delta == +/- 0.5 */
  4924. return vreinterpretq_m128i_s32(vbslq_s32(is_delta_half, r_even, r_normal));
  4925. #endif
  4926. }
  4927. // Convert packed single-precision (32-bit) floating-point elements in a to
  4928. // packed 16-bit integers, and store the results in dst. Note: this intrinsic
  4929. // will generate 0x7FFF, rather than 0x8000, for input values between 0x7FFF and
  4930. // 0x7FFFFFFF.
  4931. //
  4932. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtps_pi16
  4933. FORCE_INLINE __m64 _mm_cvtps_pi16(__m128 a)
  4934. {
  4935. return vreinterpret_m64_s16(
  4936. vmovn_s32(vreinterpretq_s32_m128i(_mm_cvtps_epi32(a))));
  4937. }
  4938. // Copy the lower 32-bit integer in a to dst.
  4939. //
  4940. // dst[31:0] := a[31:0]
  4941. //
  4942. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi128_si32
  4943. FORCE_INLINE int _mm_cvtsi128_si32(__m128i a)
  4944. {
  4945. return vgetq_lane_s32(vreinterpretq_s32_m128i(a), 0);
  4946. }
  4947. // Copy the lower 64-bit integer in a to dst.
  4948. //
  4949. // dst[63:0] := a[63:0]
  4950. //
  4951. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi128_si64
  4952. FORCE_INLINE int64_t _mm_cvtsi128_si64(__m128i a)
  4953. {
  4954. return vgetq_lane_s64(vreinterpretq_s64_m128i(a), 0);
  4955. }
  4956. // Copy the lower 64-bit integer in a to dst.
  4957. //
  4958. // dst[63:0] := a[63:0]
  4959. //
  4960. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi128_si64x
  4961. #define _mm_cvtsi128_si64x(a) _mm_cvtsi128_si64(a)
  4962. // Moves 32-bit integer a to the least significant 32 bits of an __m128 object,
  4963. // zero extending the upper bits.
  4964. //
  4965. // r0 := a
  4966. // r1 := 0x0
  4967. // r2 := 0x0
  4968. // r3 := 0x0
  4969. //
  4970. // https://msdn.microsoft.com/en-us/library/ct3539ha%28v=vs.90%29.aspx
  4971. FORCE_INLINE __m128i _mm_cvtsi32_si128(int a)
  4972. {
  4973. return vreinterpretq_m128i_s32(vsetq_lane_s32(a, vdupq_n_s32(0), 0));
  4974. }
  4975. // Moves 64-bit integer a to the least significant 64 bits of an __m128 object,
  4976. // zero extending the upper bits.
  4977. //
  4978. // r0 := a
  4979. // r1 := 0x0
  4980. FORCE_INLINE __m128i _mm_cvtsi64_si128(int64_t a)
  4981. {
  4982. return vreinterpretq_m128i_s64(vsetq_lane_s64(a, vdupq_n_s64(0), 0));
  4983. }
  4984. // Cast vector of type __m128 to type __m128d. This intrinsic is only used for
  4985. // compilation and does not generate any instructions, thus it has zero latency.
  4986. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castps_pd
  4987. FORCE_INLINE __m128d _mm_castps_pd(__m128 a)
  4988. {
  4989. return vreinterpretq_m128d_s32(vreinterpretq_s32_m128(a));
  4990. }
  4991. // Applies a type cast to reinterpret four 32-bit floating point values passed
  4992. // in as a 128-bit parameter as packed 32-bit integers.
  4993. // https://msdn.microsoft.com/en-us/library/bb514099.aspx
  4994. FORCE_INLINE __m128i _mm_castps_si128(__m128 a)
  4995. {
  4996. return vreinterpretq_m128i_s32(vreinterpretq_s32_m128(a));
  4997. }
  4998. // Cast vector of type __m128i to type __m128d. This intrinsic is only used for
  4999. // compilation and does not generate any instructions, thus it has zero latency.
  5000. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castsi128_pd
  5001. FORCE_INLINE __m128d _mm_castsi128_pd(__m128i a)
  5002. {
  5003. #if defined(__aarch64__)
  5004. return vreinterpretq_m128d_f64(vreinterpretq_f64_m128i(a));
  5005. #else
  5006. return vreinterpretq_m128d_f32(vreinterpretq_f32_m128i(a));
  5007. #endif
  5008. }
  5009. // Applies a type cast to reinterpret four 32-bit integers passed in as a
  5010. // 128-bit parameter as packed 32-bit floating point values.
  5011. // https://msdn.microsoft.com/en-us/library/bb514029.aspx
  5012. FORCE_INLINE __m128 _mm_castsi128_ps(__m128i a)
  5013. {
  5014. return vreinterpretq_m128_s32(vreinterpretq_s32_m128i(a));
  5015. }
  5016. // Loads 128-bit value. :
  5017. // https://msdn.microsoft.com/en-us/library/atzzad1h(v=vs.80).aspx
  5018. FORCE_INLINE __m128i _mm_load_si128(const __m128i *p)
  5019. {
  5020. return vreinterpretq_m128i_s32(vld1q_s32((const int32_t *) p));
  5021. }
  5022. // Load a double-precision (64-bit) floating-point element from memory into both
  5023. // elements of dst.
  5024. //
  5025. // dst[63:0] := MEM[mem_addr+63:mem_addr]
  5026. // dst[127:64] := MEM[mem_addr+63:mem_addr]
  5027. //
  5028. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load1_pd
  5029. FORCE_INLINE __m128d _mm_load1_pd(const double *p)
  5030. {
  5031. #if defined(__aarch64__)
  5032. return vreinterpretq_m128d_f64(vld1q_dup_f64(p));
  5033. #else
  5034. return vreinterpretq_m128d_s64(vdupq_n_s64(*(const int64_t *) p));
  5035. #endif
  5036. }
  5037. // Load a double-precision (64-bit) floating-point element from memory into both
  5038. // elements of dst.
  5039. //
  5040. // dst[63:0] := MEM[mem_addr+63:mem_addr]
  5041. // dst[127:64] := MEM[mem_addr+63:mem_addr]
  5042. //
  5043. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_pd1
  5044. #define _mm_load_pd1 _mm_load1_pd
  5045. // Load a double-precision (64-bit) floating-point element from memory into the
  5046. // upper element of dst, and copy the lower element from a to dst. mem_addr does
  5047. // not need to be aligned on any particular boundary.
  5048. //
  5049. // dst[63:0] := a[63:0]
  5050. // dst[127:64] := MEM[mem_addr+63:mem_addr]
  5051. //
  5052. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadh_pd
  5053. FORCE_INLINE __m128d _mm_loadh_pd(__m128d a, const double *p)
  5054. {
  5055. #if defined(__aarch64__)
  5056. return vreinterpretq_m128d_f64(
  5057. vcombine_f64(vget_low_f64(vreinterpretq_f64_m128d(a)), vld1_f64(p)));
  5058. #else
  5059. return vreinterpretq_m128d_f32(vcombine_f32(
  5060. vget_low_f32(vreinterpretq_f32_m128d(a)), vld1_f32((const float *) p)));
  5061. #endif
  5062. }
  5063. // Load a double-precision (64-bit) floating-point element from memory into both
  5064. // elements of dst.
  5065. //
  5066. // dst[63:0] := MEM[mem_addr+63:mem_addr]
  5067. // dst[127:64] := MEM[mem_addr+63:mem_addr]
  5068. //
  5069. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_pd1
  5070. #define _mm_load_pd1 _mm_load1_pd
  5071. // Load a double-precision (64-bit) floating-point element from memory into both
  5072. // elements of dst.
  5073. //
  5074. // dst[63:0] := MEM[mem_addr+63:mem_addr]
  5075. // dst[127:64] := MEM[mem_addr+63:mem_addr]
  5076. //
  5077. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loaddup_pd
  5078. #define _mm_loaddup_pd _mm_load1_pd
  5079. // Loads 128-bit value. :
  5080. // https://msdn.microsoft.com/zh-cn/library/f4k12ae8(v=vs.90).aspx
  5081. FORCE_INLINE __m128i _mm_loadu_si128(const __m128i *p)
  5082. {
  5083. return vreinterpretq_m128i_s32(vld1q_s32((const int32_t *) p));
  5084. }
  5085. // Load unaligned 32-bit integer from memory into the first element of dst.
  5086. //
  5087. // dst[31:0] := MEM[mem_addr+31:mem_addr]
  5088. // dst[MAX:32] := 0
  5089. //
  5090. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadu_si32
  5091. FORCE_INLINE __m128i _mm_loadu_si32(const void *p)
  5092. {
  5093. return vreinterpretq_m128i_s32(
  5094. vsetq_lane_s32(*(const int32_t *) p, vdupq_n_s32(0), 0));
  5095. }
  5096. // Convert packed double-precision (64-bit) floating-point elements in a to
  5097. // packed single-precision (32-bit) floating-point elements, and store the
  5098. // results in dst.
  5099. //
  5100. // FOR j := 0 to 1
  5101. // i := 32*j
  5102. // k := 64*j
  5103. // dst[i+31:i] := Convert_FP64_To_FP32(a[k+64:k])
  5104. // ENDFOR
  5105. // dst[127:64] := 0
  5106. //
  5107. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpd_ps
  5108. FORCE_INLINE __m128 _mm_cvtpd_ps(__m128d a)
  5109. {
  5110. #if defined(__aarch64__)
  5111. float32x2_t tmp = vcvt_f32_f64(vreinterpretq_f64_m128d(a));
  5112. return vreinterpretq_m128_f32(vcombine_f32(tmp, vdup_n_f32(0)));
  5113. #else
  5114. float a0 = (float) ((double *) &a)[0];
  5115. float a1 = (float) ((double *) &a)[1];
  5116. return _mm_set_ps(0, 0, a1, a0);
  5117. #endif
  5118. }
  5119. // Copy the lower double-precision (64-bit) floating-point element of a to dst.
  5120. //
  5121. // dst[63:0] := a[63:0]
  5122. //
  5123. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsd_f64
  5124. FORCE_INLINE double _mm_cvtsd_f64(__m128d a)
  5125. {
  5126. #if defined(__aarch64__)
  5127. return (double) vgetq_lane_f64(vreinterpretq_f64_m128d(a), 0);
  5128. #else
  5129. return ((double *) &a)[0];
  5130. #endif
  5131. }
  5132. // Convert packed single-precision (32-bit) floating-point elements in a to
  5133. // packed double-precision (64-bit) floating-point elements, and store the
  5134. // results in dst.
  5135. //
  5136. // FOR j := 0 to 1
  5137. // i := 64*j
  5138. // k := 32*j
  5139. // dst[i+63:i] := Convert_FP32_To_FP64(a[k+31:k])
  5140. // ENDFOR
  5141. //
  5142. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtps_pd
  5143. FORCE_INLINE __m128d _mm_cvtps_pd(__m128 a)
  5144. {
  5145. #if defined(__aarch64__)
  5146. return vreinterpretq_m128d_f64(
  5147. vcvt_f64_f32(vget_low_f32(vreinterpretq_f32_m128(a))));
  5148. #else
  5149. double a0 = (double) vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
  5150. double a1 = (double) vgetq_lane_f32(vreinterpretq_f32_m128(a), 1);
  5151. return _mm_set_pd(a1, a0);
  5152. #endif
  5153. }
  5154. // Cast vector of type __m128d to type __m128i. This intrinsic is only used for
  5155. // compilation and does not generate any instructions, thus it has zero latency.
  5156. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castpd_si128
  5157. FORCE_INLINE __m128i _mm_castpd_si128(__m128d a)
  5158. {
  5159. return vreinterpretq_m128i_s64(vreinterpretq_s64_m128d(a));
  5160. }
  5161. // Cast vector of type __m128d to type __m128. This intrinsic is only used for
  5162. // compilation and does not generate any instructions, thus it has zero latency.
  5163. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castpd_ps
  5164. FORCE_INLINE __m128 _mm_castpd_ps(__m128d a)
  5165. {
  5166. return vreinterpretq_m128_s64(vreinterpretq_s64_m128d(a));
  5167. }
  5168. // Blend packed single-precision (32-bit) floating-point elements from a and b
  5169. // using mask, and store the results in dst.
  5170. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_blendv_ps
  5171. FORCE_INLINE __m128 _mm_blendv_ps(__m128 _a, __m128 _b, __m128 _mask)
  5172. {
  5173. // Use a signed shift right to create a mask with the sign bit
  5174. uint32x4_t mask =
  5175. vreinterpretq_u32_s32(vshrq_n_s32(vreinterpretq_s32_m128(_mask), 31));
  5176. float32x4_t a = vreinterpretq_f32_m128(_a);
  5177. float32x4_t b = vreinterpretq_f32_m128(_b);
  5178. return vreinterpretq_m128_f32(vbslq_f32(mask, b, a));
  5179. }
  5180. // Blend packed single-precision (32-bit) floating-point elements from a and b
  5181. // using mask, and store the results in dst.
  5182. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_blend_ps
  5183. FORCE_INLINE __m128 _mm_blend_ps(__m128 _a, __m128 _b, const char imm8)
  5184. {
  5185. const uint32_t ALIGN_STRUCT(16)
  5186. data[4] = {((imm8) & (1 << 0)) ? UINT32_MAX : 0,
  5187. ((imm8) & (1 << 1)) ? UINT32_MAX : 0,
  5188. ((imm8) & (1 << 2)) ? UINT32_MAX : 0,
  5189. ((imm8) & (1 << 3)) ? UINT32_MAX : 0};
  5190. uint32x4_t mask = vld1q_u32(data);
  5191. float32x4_t a = vreinterpretq_f32_m128(_a);
  5192. float32x4_t b = vreinterpretq_f32_m128(_b);
  5193. return vreinterpretq_m128_f32(vbslq_f32(mask, b, a));
  5194. }
  5195. // Blend packed double-precision (64-bit) floating-point elements from a and b
  5196. // using mask, and store the results in dst.
  5197. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_blendv_pd
  5198. FORCE_INLINE __m128d _mm_blendv_pd(__m128d _a, __m128d _b, __m128d _mask)
  5199. {
  5200. uint64x2_t mask =
  5201. vreinterpretq_u64_s64(vshrq_n_s64(vreinterpretq_s64_m128d(_mask), 63));
  5202. #if defined(__aarch64__)
  5203. float64x2_t a = vreinterpretq_f64_m128d(_a);
  5204. float64x2_t b = vreinterpretq_f64_m128d(_b);
  5205. return vreinterpretq_m128d_f64(vbslq_f64(mask, b, a));
  5206. #else
  5207. uint64x2_t a = vreinterpretq_u64_m128d(_a);
  5208. uint64x2_t b = vreinterpretq_u64_m128d(_b);
  5209. return vreinterpretq_m128d_u64(vbslq_u64(mask, b, a));
  5210. #endif
  5211. }
  5212. typedef struct {
  5213. uint16_t res0;
  5214. uint8_t res1 : 6;
  5215. uint8_t bit22 : 1;
  5216. uint8_t bit23 : 1;
  5217. uint8_t res2;
  5218. #if defined(__aarch64__)
  5219. uint32_t res3;
  5220. #endif
  5221. } fpcr_bitfield;
  5222. // Macro: Set the rounding mode bits of the MXCSR control and status register to
  5223. // the value in unsigned 32-bit integer a. The rounding mode may contain any of
  5224. // the following flags: _MM_ROUND_NEAREST, _MM_ROUND_DOWN, _MM_ROUND_UP,
  5225. // _MM_ROUND_TOWARD_ZERO
  5226. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_MM_SET_ROUNDING_MODE
  5227. FORCE_INLINE void _MM_SET_ROUNDING_MODE(int rounding)
  5228. {
  5229. union {
  5230. fpcr_bitfield field;
  5231. #if defined(__aarch64__)
  5232. uint64_t value;
  5233. #else
  5234. uint32_t value;
  5235. #endif
  5236. } r;
  5237. #if defined(__aarch64__)
  5238. asm volatile("mrs %0, FPCR" : "=r"(r.value)); /* read */
  5239. #else
  5240. asm volatile("vmrs %0, FPSCR" : "=r"(r.value)); /* read */
  5241. #endif
  5242. switch (rounding) {
  5243. case _MM_ROUND_TOWARD_ZERO:
  5244. r.field.bit22 = 1;
  5245. r.field.bit23 = 1;
  5246. break;
  5247. case _MM_ROUND_DOWN:
  5248. r.field.bit22 = 0;
  5249. r.field.bit23 = 1;
  5250. break;
  5251. case _MM_ROUND_UP:
  5252. r.field.bit22 = 1;
  5253. r.field.bit23 = 0;
  5254. break;
  5255. default: //_MM_ROUND_NEAREST
  5256. r.field.bit22 = 0;
  5257. r.field.bit23 = 0;
  5258. }
  5259. #if defined(__aarch64__)
  5260. asm volatile("msr FPCR, %0" ::"r"(r)); /* write */
  5261. #else
  5262. asm volatile("vmsr FPSCR, %0" ::"r"(r)); /* write */
  5263. #endif
  5264. }
  5265. FORCE_INLINE void _mm_setcsr(unsigned int a)
  5266. {
  5267. _MM_SET_ROUNDING_MODE(a);
  5268. }
  5269. // Round the packed single-precision (32-bit) floating-point elements in a using
  5270. // the rounding parameter, and store the results as packed single-precision
  5271. // floating-point elements in dst.
  5272. // software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_round_ps
  5273. FORCE_INLINE __m128 _mm_round_ps(__m128 a, int rounding)
  5274. {
  5275. #if defined(__aarch64__)
  5276. switch (rounding) {
  5277. case (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC):
  5278. return vreinterpretq_m128_f32(vrndnq_f32(vreinterpretq_f32_m128(a)));
  5279. case (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC):
  5280. return vreinterpretq_m128_f32(vrndmq_f32(vreinterpretq_f32_m128(a)));
  5281. case (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC):
  5282. return vreinterpretq_m128_f32(vrndpq_f32(vreinterpretq_f32_m128(a)));
  5283. case (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC):
  5284. return vreinterpretq_m128_f32(vrndq_f32(vreinterpretq_f32_m128(a)));
  5285. default: //_MM_FROUND_CUR_DIRECTION
  5286. return vreinterpretq_m128_f32(vrndiq_f32(vreinterpretq_f32_m128(a)));
  5287. }
  5288. #else
  5289. float *v_float = (float *) &a;
  5290. __m128 zero, neg_inf, pos_inf;
  5291. switch (rounding) {
  5292. case (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC):
  5293. return _mm_cvtepi32_ps(_mm_cvtps_epi32(a));
  5294. case (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC):
  5295. return (__m128){floorf(v_float[0]), floorf(v_float[1]),
  5296. floorf(v_float[2]), floorf(v_float[3])};
  5297. case (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC):
  5298. return (__m128){ceilf(v_float[0]), ceilf(v_float[1]), ceilf(v_float[2]),
  5299. ceilf(v_float[3])};
  5300. case (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC):
  5301. zero = _mm_set_ps(0.0f, 0.0f, 0.0f, 0.0f);
  5302. neg_inf = _mm_set_ps(floorf(v_float[0]), floorf(v_float[1]),
  5303. floorf(v_float[2]), floorf(v_float[3]));
  5304. pos_inf = _mm_set_ps(ceilf(v_float[0]), ceilf(v_float[1]),
  5305. ceilf(v_float[2]), ceilf(v_float[3]));
  5306. return _mm_blendv_ps(pos_inf, neg_inf, _mm_cmple_ps(a, zero));
  5307. default: //_MM_FROUND_CUR_DIRECTION
  5308. return (__m128){roundf(v_float[0]), roundf(v_float[1]),
  5309. roundf(v_float[2]), roundf(v_float[3])};
  5310. }
  5311. #endif
  5312. }
  5313. // Convert packed single-precision (32-bit) floating-point elements in a to
  5314. // packed 32-bit integers, and store the results in dst.
  5315. //
  5316. // FOR j := 0 to 1
  5317. // i := 32*j
  5318. // dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i])
  5319. // ENDFOR
  5320. //
  5321. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_ps2pi
  5322. FORCE_INLINE __m64 _mm_cvt_ps2pi(__m128 a)
  5323. {
  5324. #if defined(__aarch64__)
  5325. return vreinterpret_m64_s32(
  5326. vget_low_s32(vcvtnq_s32_f32(vreinterpretq_f32_m128(a))));
  5327. #else
  5328. return vreinterpret_m64_s32(
  5329. vcvt_s32_f32(vget_low_f32(vreinterpretq_f32_m128(
  5330. _mm_round_ps(a, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)))));
  5331. #endif
  5332. }
  5333. // Convert packed single-precision (32-bit) floating-point elements in a to
  5334. // packed 32-bit integers, and store the results in dst.
  5335. //
  5336. // FOR j := 0 to 1
  5337. // i := 32*j
  5338. // dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i])
  5339. // ENDFOR
  5340. //
  5341. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtps_pi32
  5342. #define _mm_cvtps_pi32(a) _mm_cvt_ps2pi(a)
  5343. // Round the packed single-precision (32-bit) floating-point elements in a up to
  5344. // an integer value, and store the results as packed single-precision
  5345. // floating-point elements in dst.
  5346. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ceil_ps
  5347. FORCE_INLINE __m128 _mm_ceil_ps(__m128 a)
  5348. {
  5349. return _mm_round_ps(a, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
  5350. }
  5351. // Round the lower single-precision (32-bit) floating-point element in b up to
  5352. // an integer value, store the result as a single-precision floating-point
  5353. // element in the lower element of dst, and copy the upper 3 packed elements
  5354. // from a to the upper elements of dst.
  5355. //
  5356. // dst[31:0] := CEIL(b[31:0])
  5357. // dst[127:32] := a[127:32]
  5358. //
  5359. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ceil_ss
  5360. FORCE_INLINE __m128 _mm_ceil_ss(__m128 a, __m128 b)
  5361. {
  5362. return _mm_move_ss(
  5363. a, _mm_round_ps(b, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC));
  5364. }
  5365. // Round the packed single-precision (32-bit) floating-point elements in a down
  5366. // to an integer value, and store the results as packed single-precision
  5367. // floating-point elements in dst.
  5368. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_floor_ps
  5369. FORCE_INLINE __m128 _mm_floor_ps(__m128 a)
  5370. {
  5371. return _mm_round_ps(a, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
  5372. }
  5373. // Round the lower single-precision (32-bit) floating-point element in b down to
  5374. // an integer value, store the result as a single-precision floating-point
  5375. // element in the lower element of dst, and copy the upper 3 packed elements
  5376. // from a to the upper elements of dst.
  5377. //
  5378. // dst[31:0] := FLOOR(b[31:0])
  5379. // dst[127:32] := a[127:32]
  5380. //
  5381. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_floor_ss
  5382. FORCE_INLINE __m128 _mm_floor_ss(__m128 a, __m128 b)
  5383. {
  5384. return _mm_move_ss(
  5385. a, _mm_round_ps(b, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC));
  5386. }
  5387. // Load 128-bits of integer data from unaligned memory into dst. This intrinsic
  5388. // may perform better than _mm_loadu_si128 when the data crosses a cache line
  5389. // boundary.
  5390. //
  5391. // dst[127:0] := MEM[mem_addr+127:mem_addr]
  5392. //
  5393. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_lddqu_si128
  5394. #define _mm_lddqu_si128 _mm_loadu_si128
  5395. /* Miscellaneous Operations */
  5396. // Shifts the 8 signed 16-bit integers in a right by count bits while shifting
  5397. // in the sign bit.
  5398. //
  5399. // r0 := a0 >> count
  5400. // r1 := a1 >> count
  5401. // ...
  5402. // r7 := a7 >> count
  5403. //
  5404. // https://msdn.microsoft.com/en-us/library/3c9997dk(v%3dvs.90).aspx
  5405. FORCE_INLINE __m128i _mm_sra_epi16(__m128i a, __m128i count)
  5406. {
  5407. int64_t c = (int64_t) vget_low_s64((int64x2_t) count);
  5408. if (unlikely(c > 15))
  5409. return _mm_cmplt_epi16(a, _mm_setzero_si128());
  5410. return vreinterpretq_m128i_s16(vshlq_s16((int16x8_t) a, vdupq_n_s16(-c)));
  5411. }
  5412. // Shifts the 4 signed 32-bit integers in a right by count bits while shifting
  5413. // in the sign bit.
  5414. //
  5415. // r0 := a0 >> count
  5416. // r1 := a1 >> count
  5417. // r2 := a2 >> count
  5418. // r3 := a3 >> count
  5419. //
  5420. // https://msdn.microsoft.com/en-us/library/ce40009e(v%3dvs.100).aspx
  5421. FORCE_INLINE __m128i _mm_sra_epi32(__m128i a, __m128i count)
  5422. {
  5423. int64_t c = (int64_t) vget_low_s64((int64x2_t) count);
  5424. if (unlikely(c > 31))
  5425. return _mm_cmplt_epi32(a, _mm_setzero_si128());
  5426. return vreinterpretq_m128i_s32(vshlq_s32((int32x4_t) a, vdupq_n_s32(-c)));
  5427. }
  5428. // Packs the 16 signed 16-bit integers from a and b into 8-bit integers and
  5429. // saturates.
  5430. // https://msdn.microsoft.com/en-us/library/k4y4f7w5%28v=vs.90%29.aspx
  5431. FORCE_INLINE __m128i _mm_packs_epi16(__m128i a, __m128i b)
  5432. {
  5433. return vreinterpretq_m128i_s8(
  5434. vcombine_s8(vqmovn_s16(vreinterpretq_s16_m128i(a)),
  5435. vqmovn_s16(vreinterpretq_s16_m128i(b))));
  5436. }
  5437. // Packs the 16 signed 16 - bit integers from a and b into 8 - bit unsigned
  5438. // integers and saturates.
  5439. //
  5440. // r0 := UnsignedSaturate(a0)
  5441. // r1 := UnsignedSaturate(a1)
  5442. // ...
  5443. // r7 := UnsignedSaturate(a7)
  5444. // r8 := UnsignedSaturate(b0)
  5445. // r9 := UnsignedSaturate(b1)
  5446. // ...
  5447. // r15 := UnsignedSaturate(b7)
  5448. //
  5449. // https://msdn.microsoft.com/en-us/library/07ad1wx4(v=vs.100).aspx
  5450. FORCE_INLINE __m128i _mm_packus_epi16(const __m128i a, const __m128i b)
  5451. {
  5452. return vreinterpretq_m128i_u8(
  5453. vcombine_u8(vqmovun_s16(vreinterpretq_s16_m128i(a)),
  5454. vqmovun_s16(vreinterpretq_s16_m128i(b))));
  5455. }
  5456. // Packs the 8 signed 32-bit integers from a and b into signed 16-bit integers
  5457. // and saturates.
  5458. //
  5459. // r0 := SignedSaturate(a0)
  5460. // r1 := SignedSaturate(a1)
  5461. // r2 := SignedSaturate(a2)
  5462. // r3 := SignedSaturate(a3)
  5463. // r4 := SignedSaturate(b0)
  5464. // r5 := SignedSaturate(b1)
  5465. // r6 := SignedSaturate(b2)
  5466. // r7 := SignedSaturate(b3)
  5467. //
  5468. // https://msdn.microsoft.com/en-us/library/393t56f9%28v=vs.90%29.aspx
  5469. FORCE_INLINE __m128i _mm_packs_epi32(__m128i a, __m128i b)
  5470. {
  5471. return vreinterpretq_m128i_s16(
  5472. vcombine_s16(vqmovn_s32(vreinterpretq_s32_m128i(a)),
  5473. vqmovn_s32(vreinterpretq_s32_m128i(b))));
  5474. }
  5475. // Packs the 8 unsigned 32-bit integers from a and b into unsigned 16-bit
  5476. // integers and saturates.
  5477. //
  5478. // r0 := UnsignedSaturate(a0)
  5479. // r1 := UnsignedSaturate(a1)
  5480. // r2 := UnsignedSaturate(a2)
  5481. // r3 := UnsignedSaturate(a3)
  5482. // r4 := UnsignedSaturate(b0)
  5483. // r5 := UnsignedSaturate(b1)
  5484. // r6 := UnsignedSaturate(b2)
  5485. // r7 := UnsignedSaturate(b3)
  5486. FORCE_INLINE __m128i _mm_packus_epi32(__m128i a, __m128i b)
  5487. {
  5488. return vreinterpretq_m128i_u16(
  5489. vcombine_u16(vqmovun_s32(vreinterpretq_s32_m128i(a)),
  5490. vqmovun_s32(vreinterpretq_s32_m128i(b))));
  5491. }
  5492. // Interleaves the lower 8 signed or unsigned 8-bit integers in a with the lower
  5493. // 8 signed or unsigned 8-bit integers in b.
  5494. //
  5495. // r0 := a0
  5496. // r1 := b0
  5497. // r2 := a1
  5498. // r3 := b1
  5499. // ...
  5500. // r14 := a7
  5501. // r15 := b7
  5502. //
  5503. // https://msdn.microsoft.com/en-us/library/xf7k860c%28v=vs.90%29.aspx
  5504. FORCE_INLINE __m128i _mm_unpacklo_epi8(__m128i a, __m128i b)
  5505. {
  5506. #if defined(__aarch64__)
  5507. return vreinterpretq_m128i_s8(
  5508. vzip1q_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
  5509. #else
  5510. int8x8_t a1 = vreinterpret_s8_s16(vget_low_s16(vreinterpretq_s16_m128i(a)));
  5511. int8x8_t b1 = vreinterpret_s8_s16(vget_low_s16(vreinterpretq_s16_m128i(b)));
  5512. int8x8x2_t result = vzip_s8(a1, b1);
  5513. return vreinterpretq_m128i_s8(vcombine_s8(result.val[0], result.val[1]));
  5514. #endif
  5515. }
  5516. // Interleaves the lower 4 signed or unsigned 16-bit integers in a with the
  5517. // lower 4 signed or unsigned 16-bit integers in b.
  5518. //
  5519. // r0 := a0
  5520. // r1 := b0
  5521. // r2 := a1
  5522. // r3 := b1
  5523. // r4 := a2
  5524. // r5 := b2
  5525. // r6 := a3
  5526. // r7 := b3
  5527. //
  5528. // https://msdn.microsoft.com/en-us/library/btxb17bw%28v=vs.90%29.aspx
  5529. FORCE_INLINE __m128i _mm_unpacklo_epi16(__m128i a, __m128i b)
  5530. {
  5531. #if defined(__aarch64__)
  5532. return vreinterpretq_m128i_s16(
  5533. vzip1q_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
  5534. #else
  5535. int16x4_t a1 = vget_low_s16(vreinterpretq_s16_m128i(a));
  5536. int16x4_t b1 = vget_low_s16(vreinterpretq_s16_m128i(b));
  5537. int16x4x2_t result = vzip_s16(a1, b1);
  5538. return vreinterpretq_m128i_s16(vcombine_s16(result.val[0], result.val[1]));
  5539. #endif
  5540. }
  5541. // Interleaves the lower 2 signed or unsigned 32 - bit integers in a with the
  5542. // lower 2 signed or unsigned 32 - bit integers in b.
  5543. //
  5544. // r0 := a0
  5545. // r1 := b0
  5546. // r2 := a1
  5547. // r3 := b1
  5548. //
  5549. // https://msdn.microsoft.com/en-us/library/x8atst9d(v=vs.100).aspx
  5550. FORCE_INLINE __m128i _mm_unpacklo_epi32(__m128i a, __m128i b)
  5551. {
  5552. #if defined(__aarch64__)
  5553. return vreinterpretq_m128i_s32(
  5554. vzip1q_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
  5555. #else
  5556. int32x2_t a1 = vget_low_s32(vreinterpretq_s32_m128i(a));
  5557. int32x2_t b1 = vget_low_s32(vreinterpretq_s32_m128i(b));
  5558. int32x2x2_t result = vzip_s32(a1, b1);
  5559. return vreinterpretq_m128i_s32(vcombine_s32(result.val[0], result.val[1]));
  5560. #endif
  5561. }
  5562. FORCE_INLINE __m128i _mm_unpacklo_epi64(__m128i a, __m128i b)
  5563. {
  5564. int64x1_t a_l = vget_low_s64(vreinterpretq_s64_m128i(a));
  5565. int64x1_t b_l = vget_low_s64(vreinterpretq_s64_m128i(b));
  5566. return vreinterpretq_m128i_s64(vcombine_s64(a_l, b_l));
  5567. }
  5568. // Selects and interleaves the lower two single-precision, floating-point values
  5569. // from a and b.
  5570. //
  5571. // r0 := a0
  5572. // r1 := b0
  5573. // r2 := a1
  5574. // r3 := b1
  5575. //
  5576. // https://msdn.microsoft.com/en-us/library/25st103b%28v=vs.90%29.aspx
  5577. FORCE_INLINE __m128 _mm_unpacklo_ps(__m128 a, __m128 b)
  5578. {
  5579. #if defined(__aarch64__)
  5580. return vreinterpretq_m128_f32(
  5581. vzip1q_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  5582. #else
  5583. float32x2_t a1 = vget_low_f32(vreinterpretq_f32_m128(a));
  5584. float32x2_t b1 = vget_low_f32(vreinterpretq_f32_m128(b));
  5585. float32x2x2_t result = vzip_f32(a1, b1);
  5586. return vreinterpretq_m128_f32(vcombine_f32(result.val[0], result.val[1]));
  5587. #endif
  5588. }
  5589. // Unpack and interleave double-precision (64-bit) floating-point elements from
  5590. // the low half of a and b, and store the results in dst.
  5591. //
  5592. // DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) {
  5593. // dst[63:0] := src1[63:0]
  5594. // dst[127:64] := src2[63:0]
  5595. // RETURN dst[127:0]
  5596. // }
  5597. // dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0])
  5598. //
  5599. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_unpacklo_pd
  5600. FORCE_INLINE __m128d _mm_unpacklo_pd(__m128d a, __m128d b)
  5601. {
  5602. #if defined(__aarch64__)
  5603. return vreinterpretq_m128d_f64(
  5604. vzip1q_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
  5605. #else
  5606. return vreinterpretq_m128d_s64(
  5607. vcombine_s64(vget_low_s64(vreinterpretq_s64_m128d(a)),
  5608. vget_low_s64(vreinterpretq_s64_m128d(b))));
  5609. #endif
  5610. }
  5611. // Unpack and interleave double-precision (64-bit) floating-point elements from
  5612. // the high half of a and b, and store the results in dst.
  5613. //
  5614. // DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) {
  5615. // dst[63:0] := src1[127:64]
  5616. // dst[127:64] := src2[127:64]
  5617. // RETURN dst[127:0]
  5618. // }
  5619. // dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0])
  5620. //
  5621. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_unpackhi_pd
  5622. FORCE_INLINE __m128d _mm_unpackhi_pd(__m128d a, __m128d b)
  5623. {
  5624. #if defined(__aarch64__)
  5625. return vreinterpretq_m128d_f64(
  5626. vzip2q_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
  5627. #else
  5628. return vreinterpretq_m128d_s64(
  5629. vcombine_s64(vget_high_s64(vreinterpretq_s64_m128d(a)),
  5630. vget_high_s64(vreinterpretq_s64_m128d(b))));
  5631. #endif
  5632. }
  5633. // Selects and interleaves the upper two single-precision, floating-point values
  5634. // from a and b.
  5635. //
  5636. // r0 := a2
  5637. // r1 := b2
  5638. // r2 := a3
  5639. // r3 := b3
  5640. //
  5641. // https://msdn.microsoft.com/en-us/library/skccxx7d%28v=vs.90%29.aspx
  5642. FORCE_INLINE __m128 _mm_unpackhi_ps(__m128 a, __m128 b)
  5643. {
  5644. #if defined(__aarch64__)
  5645. return vreinterpretq_m128_f32(
  5646. vzip2q_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  5647. #else
  5648. float32x2_t a1 = vget_high_f32(vreinterpretq_f32_m128(a));
  5649. float32x2_t b1 = vget_high_f32(vreinterpretq_f32_m128(b));
  5650. float32x2x2_t result = vzip_f32(a1, b1);
  5651. return vreinterpretq_m128_f32(vcombine_f32(result.val[0], result.val[1]));
  5652. #endif
  5653. }
  5654. // Interleaves the upper 8 signed or unsigned 8-bit integers in a with the upper
  5655. // 8 signed or unsigned 8-bit integers in b.
  5656. //
  5657. // r0 := a8
  5658. // r1 := b8
  5659. // r2 := a9
  5660. // r3 := b9
  5661. // ...
  5662. // r14 := a15
  5663. // r15 := b15
  5664. //
  5665. // https://msdn.microsoft.com/en-us/library/t5h7783k(v=vs.100).aspx
  5666. FORCE_INLINE __m128i _mm_unpackhi_epi8(__m128i a, __m128i b)
  5667. {
  5668. #if defined(__aarch64__)
  5669. return vreinterpretq_m128i_s8(
  5670. vzip2q_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
  5671. #else
  5672. int8x8_t a1 =
  5673. vreinterpret_s8_s16(vget_high_s16(vreinterpretq_s16_m128i(a)));
  5674. int8x8_t b1 =
  5675. vreinterpret_s8_s16(vget_high_s16(vreinterpretq_s16_m128i(b)));
  5676. int8x8x2_t result = vzip_s8(a1, b1);
  5677. return vreinterpretq_m128i_s8(vcombine_s8(result.val[0], result.val[1]));
  5678. #endif
  5679. }
  5680. // Interleaves the upper 4 signed or unsigned 16-bit integers in a with the
  5681. // upper 4 signed or unsigned 16-bit integers in b.
  5682. //
  5683. // r0 := a4
  5684. // r1 := b4
  5685. // r2 := a5
  5686. // r3 := b5
  5687. // r4 := a6
  5688. // r5 := b6
  5689. // r6 := a7
  5690. // r7 := b7
  5691. //
  5692. // https://msdn.microsoft.com/en-us/library/03196cz7(v=vs.100).aspx
  5693. FORCE_INLINE __m128i _mm_unpackhi_epi16(__m128i a, __m128i b)
  5694. {
  5695. #if defined(__aarch64__)
  5696. return vreinterpretq_m128i_s16(
  5697. vzip2q_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
  5698. #else
  5699. int16x4_t a1 = vget_high_s16(vreinterpretq_s16_m128i(a));
  5700. int16x4_t b1 = vget_high_s16(vreinterpretq_s16_m128i(b));
  5701. int16x4x2_t result = vzip_s16(a1, b1);
  5702. return vreinterpretq_m128i_s16(vcombine_s16(result.val[0], result.val[1]));
  5703. #endif
  5704. }
  5705. // Interleaves the upper 2 signed or unsigned 32-bit integers in a with the
  5706. // upper 2 signed or unsigned 32-bit integers in b.
  5707. // https://msdn.microsoft.com/en-us/library/65sa7cbs(v=vs.100).aspx
  5708. FORCE_INLINE __m128i _mm_unpackhi_epi32(__m128i a, __m128i b)
  5709. {
  5710. #if defined(__aarch64__)
  5711. return vreinterpretq_m128i_s32(
  5712. vzip2q_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
  5713. #else
  5714. int32x2_t a1 = vget_high_s32(vreinterpretq_s32_m128i(a));
  5715. int32x2_t b1 = vget_high_s32(vreinterpretq_s32_m128i(b));
  5716. int32x2x2_t result = vzip_s32(a1, b1);
  5717. return vreinterpretq_m128i_s32(vcombine_s32(result.val[0], result.val[1]));
  5718. #endif
  5719. }
  5720. // Interleaves the upper signed or unsigned 64-bit integer in a with the
  5721. // upper signed or unsigned 64-bit integer in b.
  5722. //
  5723. // r0 := a1
  5724. // r1 := b1
  5725. FORCE_INLINE __m128i _mm_unpackhi_epi64(__m128i a, __m128i b)
  5726. {
  5727. int64x1_t a_h = vget_high_s64(vreinterpretq_s64_m128i(a));
  5728. int64x1_t b_h = vget_high_s64(vreinterpretq_s64_m128i(b));
  5729. return vreinterpretq_m128i_s64(vcombine_s64(a_h, b_h));
  5730. }
  5731. // Horizontally compute the minimum amongst the packed unsigned 16-bit integers
  5732. // in a, store the minimum and index in dst, and zero the remaining bits in dst.
  5733. //
  5734. // index[2:0] := 0
  5735. // min[15:0] := a[15:0]
  5736. // FOR j := 0 to 7
  5737. // i := j*16
  5738. // IF a[i+15:i] < min[15:0]
  5739. // index[2:0] := j
  5740. // min[15:0] := a[i+15:i]
  5741. // FI
  5742. // ENDFOR
  5743. // dst[15:0] := min[15:0]
  5744. // dst[18:16] := index[2:0]
  5745. // dst[127:19] := 0
  5746. //
  5747. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_minpos_epu16
  5748. FORCE_INLINE __m128i _mm_minpos_epu16(__m128i a)
  5749. {
  5750. __m128i dst;
  5751. uint16_t min, idx = 0;
  5752. // Find the minimum value
  5753. #if defined(__aarch64__)
  5754. min = vminvq_u16(vreinterpretq_u16_m128i(a));
  5755. #else
  5756. __m64 tmp;
  5757. tmp = vreinterpret_m64_u16(
  5758. vmin_u16(vget_low_u16(vreinterpretq_u16_m128i(a)),
  5759. vget_high_u16(vreinterpretq_u16_m128i(a))));
  5760. tmp = vreinterpret_m64_u16(
  5761. vpmin_u16(vreinterpret_u16_m64(tmp), vreinterpret_u16_m64(tmp)));
  5762. tmp = vreinterpret_m64_u16(
  5763. vpmin_u16(vreinterpret_u16_m64(tmp), vreinterpret_u16_m64(tmp)));
  5764. min = vget_lane_u16(vreinterpret_u16_m64(tmp), 0);
  5765. #endif
  5766. // Get the index of the minimum value
  5767. int i;
  5768. for (i = 0; i < 8; i++) {
  5769. if (min == vgetq_lane_u16(vreinterpretq_u16_m128i(a), 0)) {
  5770. idx = (uint16_t) i;
  5771. break;
  5772. }
  5773. a = _mm_srli_si128(a, 2);
  5774. }
  5775. // Generate result
  5776. dst = _mm_setzero_si128();
  5777. dst = vreinterpretq_m128i_u16(
  5778. vsetq_lane_u16(min, vreinterpretq_u16_m128i(dst), 0));
  5779. dst = vreinterpretq_m128i_u16(
  5780. vsetq_lane_u16(idx, vreinterpretq_u16_m128i(dst), 1));
  5781. return dst;
  5782. }
  5783. // Compute the bitwise AND of 128 bits (representing integer data) in a and b,
  5784. // and set ZF to 1 if the result is zero, otherwise set ZF to 0. Compute the
  5785. // bitwise NOT of a and then AND with b, and set CF to 1 if the result is zero,
  5786. // otherwise set CF to 0. Return the CF value.
  5787. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_testc_si128
  5788. FORCE_INLINE int _mm_testc_si128(__m128i a, __m128i b)
  5789. {
  5790. int64x2_t s64 =
  5791. vandq_s64(vreinterpretq_s64_s32(vmvnq_s32(vreinterpretq_s32_m128i(a))),
  5792. vreinterpretq_s64_m128i(b));
  5793. return !(vgetq_lane_s64(s64, 0) | vgetq_lane_s64(s64, 1));
  5794. }
  5795. // Compute the bitwise AND of 128 bits (representing integer data) in a and b,
  5796. // and set ZF to 1 if the result is zero, otherwise set ZF to 0. Compute the
  5797. // bitwise NOT of a and then AND with b, and set CF to 1 if the result is zero,
  5798. // otherwise set CF to 0. Return the ZF value.
  5799. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_testz_si128
  5800. FORCE_INLINE int _mm_testz_si128(__m128i a, __m128i b)
  5801. {
  5802. int64x2_t s64 =
  5803. vandq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b));
  5804. return !(vgetq_lane_s64(s64, 0) | vgetq_lane_s64(s64, 1));
  5805. }
  5806. // Extracts the selected signed or unsigned 8-bit integer from a and zero
  5807. // extends.
  5808. // FORCE_INLINE int _mm_extract_epi8(__m128i a, __constrange(0,16) int imm)
  5809. #define _mm_extract_epi8(a, imm) vgetq_lane_u8(vreinterpretq_u8_m128i(a), (imm))
  5810. // Inserts the least significant 8 bits of b into the selected 8-bit integer
  5811. // of a.
  5812. // FORCE_INLINE __m128i _mm_insert_epi8(__m128i a, int b,
  5813. // __constrange(0,16) int imm)
  5814. #define _mm_insert_epi8(a, b, imm) \
  5815. __extension__({ \
  5816. vreinterpretq_m128i_s8( \
  5817. vsetq_lane_s8((b), vreinterpretq_s8_m128i(a), (imm))); \
  5818. })
  5819. // Extracts the selected signed or unsigned 16-bit integer from a and zero
  5820. // extends.
  5821. // https://msdn.microsoft.com/en-us/library/6dceta0c(v=vs.100).aspx
  5822. // FORCE_INLINE int _mm_extract_epi16(__m128i a, __constrange(0,8) int imm)
  5823. #define _mm_extract_epi16(a, imm) \
  5824. vgetq_lane_u16(vreinterpretq_u16_m128i(a), (imm))
  5825. // Inserts the least significant 16 bits of b into the selected 16-bit integer
  5826. // of a.
  5827. // https://msdn.microsoft.com/en-us/library/kaze8hz1%28v=vs.100%29.aspx
  5828. // FORCE_INLINE __m128i _mm_insert_epi16(__m128i a, int b,
  5829. // __constrange(0,8) int imm)
  5830. #define _mm_insert_epi16(a, b, imm) \
  5831. __extension__({ \
  5832. vreinterpretq_m128i_s16( \
  5833. vsetq_lane_s16((b), vreinterpretq_s16_m128i(a), (imm))); \
  5834. })
  5835. // Copy a to dst, and insert the 16-bit integer i into dst at the location
  5836. // specified by imm8.
  5837. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_insert_pi16
  5838. #define _mm_insert_pi16(a, b, imm) \
  5839. __extension__({ \
  5840. vreinterpret_m64_s16( \
  5841. vset_lane_s16((b), vreinterpret_s16_m64(a), (imm))); \
  5842. })
  5843. // Extracts the selected signed or unsigned 32-bit integer from a and zero
  5844. // extends.
  5845. // FORCE_INLINE int _mm_extract_epi32(__m128i a, __constrange(0,4) int imm)
  5846. #define _mm_extract_epi32(a, imm) \
  5847. vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm))
  5848. // Extracts the selected single-precision (32-bit) floating-point from a.
  5849. // FORCE_INLINE int _mm_extract_ps(__m128 a, __constrange(0,4) int imm)
  5850. #define _mm_extract_ps(a, imm) vgetq_lane_s32(vreinterpretq_s32_m128(a), (imm))
  5851. // Inserts the least significant 32 bits of b into the selected 32-bit integer
  5852. // of a.
  5853. // FORCE_INLINE __m128i _mm_insert_epi32(__m128i a, int b,
  5854. // __constrange(0,4) int imm)
  5855. #define _mm_insert_epi32(a, b, imm) \
  5856. __extension__({ \
  5857. vreinterpretq_m128i_s32( \
  5858. vsetq_lane_s32((b), vreinterpretq_s32_m128i(a), (imm))); \
  5859. })
  5860. // Extracts the selected signed or unsigned 64-bit integer from a and zero
  5861. // extends.
  5862. // FORCE_INLINE __int64 _mm_extract_epi64(__m128i a, __constrange(0,2) int imm)
  5863. #define _mm_extract_epi64(a, imm) \
  5864. vgetq_lane_s64(vreinterpretq_s64_m128i(a), (imm))
  5865. // Inserts the least significant 64 bits of b into the selected 64-bit integer
  5866. // of a.
  5867. // FORCE_INLINE __m128i _mm_insert_epi64(__m128i a, __int64 b,
  5868. // __constrange(0,2) int imm)
  5869. #define _mm_insert_epi64(a, b, imm) \
  5870. __extension__({ \
  5871. vreinterpretq_m128i_s64( \
  5872. vsetq_lane_s64((b), vreinterpretq_s64_m128i(a), (imm))); \
  5873. })
  5874. // Count the number of bits set to 1 in unsigned 32-bit integer a, and
  5875. // return that count in dst.
  5876. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_popcnt_u32
  5877. FORCE_INLINE int _mm_popcnt_u32(unsigned int a)
  5878. {
  5879. #if defined(__aarch64__)
  5880. #if __has_builtin(__builtin_popcount)
  5881. return __builtin_popcount(a);
  5882. #else
  5883. return (int) vaddlv_u8(vcnt_u8(vcreate_u8((uint64_t) a)));
  5884. #endif
  5885. #else
  5886. uint32_t count = 0;
  5887. uint8x8_t input_val, count8x8_val;
  5888. uint16x4_t count16x4_val;
  5889. uint32x2_t count32x2_val;
  5890. input_val = vld1_u8((uint8_t *) &a);
  5891. count8x8_val = vcnt_u8(input_val);
  5892. count16x4_val = vpaddl_u8(count8x8_val);
  5893. count32x2_val = vpaddl_u16(count16x4_val);
  5894. vst1_u32(&count, count32x2_val);
  5895. return count;
  5896. #endif
  5897. }
  5898. // Count the number of bits set to 1 in unsigned 64-bit integer a, and
  5899. // return that count in dst.
  5900. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_popcnt_u64
  5901. FORCE_INLINE int64_t _mm_popcnt_u64(uint64_t a)
  5902. {
  5903. #if defined(__aarch64__)
  5904. #if __has_builtin(__builtin_popcountll)
  5905. return __builtin_popcountll(a);
  5906. #else
  5907. return (int64_t) vaddlv_u8(vcnt_u8(vcreate_u8(a)));
  5908. #endif
  5909. #else
  5910. uint64_t count = 0;
  5911. uint8x8_t input_val, count8x8_val;
  5912. uint16x4_t count16x4_val;
  5913. uint32x2_t count32x2_val;
  5914. uint64x1_t count64x1_val;
  5915. input_val = vld1_u8((uint8_t *) &a);
  5916. count8x8_val = vcnt_u8(input_val);
  5917. count16x4_val = vpaddl_u8(count8x8_val);
  5918. count32x2_val = vpaddl_u16(count16x4_val);
  5919. count64x1_val = vpaddl_u32(count32x2_val);
  5920. vst1_u64(&count, count64x1_val);
  5921. return count;
  5922. #endif
  5923. }
  5924. // Macro: Transpose the 4x4 matrix formed by the 4 rows of single-precision
  5925. // (32-bit) floating-point elements in row0, row1, row2, and row3, and store the
  5926. // transposed matrix in these vectors (row0 now contains column 0, etc.).
  5927. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=MM_TRANSPOSE4_PS
  5928. #define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
  5929. do { \
  5930. float32x4x2_t ROW01 = vtrnq_f32(row0, row1); \
  5931. float32x4x2_t ROW23 = vtrnq_f32(row2, row3); \
  5932. row0 = vcombine_f32(vget_low_f32(ROW01.val[0]), \
  5933. vget_low_f32(ROW23.val[0])); \
  5934. row1 = vcombine_f32(vget_low_f32(ROW01.val[1]), \
  5935. vget_low_f32(ROW23.val[1])); \
  5936. row2 = vcombine_f32(vget_high_f32(ROW01.val[0]), \
  5937. vget_high_f32(ROW23.val[0])); \
  5938. row3 = vcombine_f32(vget_high_f32(ROW01.val[1]), \
  5939. vget_high_f32(ROW23.val[1])); \
  5940. } while (0)
  5941. /* Crypto Extensions */
  5942. #if defined(__ARM_FEATURE_CRYPTO)
  5943. // Wraps vmull_p64
  5944. FORCE_INLINE uint64x2_t _sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b)
  5945. {
  5946. poly64_t a = vget_lane_p64(vreinterpret_p64_u64(_a), 0);
  5947. poly64_t b = vget_lane_p64(vreinterpret_p64_u64(_b), 0);
  5948. return vreinterpretq_u64_p128(vmull_p64(a, b));
  5949. }
  5950. #else // ARMv7 polyfill
  5951. // ARMv7/some A64 lacks vmull_p64, but it has vmull_p8.
  5952. //
  5953. // vmull_p8 calculates 8 8-bit->16-bit polynomial multiplies, but we need a
  5954. // 64-bit->128-bit polynomial multiply.
  5955. //
  5956. // It needs some work and is somewhat slow, but it is still faster than all
  5957. // known scalar methods.
  5958. //
  5959. // Algorithm adapted to C from
  5960. // https://www.workofard.com/2017/07/ghash-for-low-end-cores/, which is adapted
  5961. // from "Fast Software Polynomial Multiplication on ARM Processors Using the
  5962. // NEON Engine" by Danilo Camara, Conrado Gouvea, Julio Lopez and Ricardo Dahab
  5963. // (https://hal.inria.fr/hal-01506572)
  5964. static uint64x2_t _sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b)
  5965. {
  5966. poly8x8_t a = vreinterpret_p8_u64(_a);
  5967. poly8x8_t b = vreinterpret_p8_u64(_b);
  5968. // Masks
  5969. uint8x16_t k48_32 = vcombine_u8(vcreate_u8(0x0000ffffffffffff),
  5970. vcreate_u8(0x00000000ffffffff));
  5971. uint8x16_t k16_00 = vcombine_u8(vcreate_u8(0x000000000000ffff),
  5972. vcreate_u8(0x0000000000000000));
  5973. // Do the multiplies, rotating with vext to get all combinations
  5974. uint8x16_t d = vreinterpretq_u8_p16(vmull_p8(a, b)); // D = A0 * B0
  5975. uint8x16_t e =
  5976. vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 1))); // E = A0 * B1
  5977. uint8x16_t f =
  5978. vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 1), b)); // F = A1 * B0
  5979. uint8x16_t g =
  5980. vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 2))); // G = A0 * B2
  5981. uint8x16_t h =
  5982. vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 2), b)); // H = A2 * B0
  5983. uint8x16_t i =
  5984. vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 3))); // I = A0 * B3
  5985. uint8x16_t j =
  5986. vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 3), b)); // J = A3 * B0
  5987. uint8x16_t k =
  5988. vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 4))); // L = A0 * B4
  5989. // Add cross products
  5990. uint8x16_t l = veorq_u8(e, f); // L = E + F
  5991. uint8x16_t m = veorq_u8(g, h); // M = G + H
  5992. uint8x16_t n = veorq_u8(i, j); // N = I + J
  5993. // Interleave. Using vzip1 and vzip2 prevents Clang from emitting TBL
  5994. // instructions.
  5995. #if defined(__aarch64__)
  5996. uint8x16_t lm_p0 = vreinterpretq_u8_u64(
  5997. vzip1q_u64(vreinterpretq_u64_u8(l), vreinterpretq_u64_u8(m)));
  5998. uint8x16_t lm_p1 = vreinterpretq_u8_u64(
  5999. vzip2q_u64(vreinterpretq_u64_u8(l), vreinterpretq_u64_u8(m)));
  6000. uint8x16_t nk_p0 = vreinterpretq_u8_u64(
  6001. vzip1q_u64(vreinterpretq_u64_u8(n), vreinterpretq_u64_u8(k)));
  6002. uint8x16_t nk_p1 = vreinterpretq_u8_u64(
  6003. vzip2q_u64(vreinterpretq_u64_u8(n), vreinterpretq_u64_u8(k)));
  6004. #else
  6005. uint8x16_t lm_p0 = vcombine_u8(vget_low_u8(l), vget_low_u8(m));
  6006. uint8x16_t lm_p1 = vcombine_u8(vget_high_u8(l), vget_high_u8(m));
  6007. uint8x16_t nk_p0 = vcombine_u8(vget_low_u8(n), vget_low_u8(k));
  6008. uint8x16_t nk_p1 = vcombine_u8(vget_high_u8(n), vget_high_u8(k));
  6009. #endif
  6010. // t0 = (L) (P0 + P1) << 8
  6011. // t1 = (M) (P2 + P3) << 16
  6012. uint8x16_t t0t1_tmp = veorq_u8(lm_p0, lm_p1);
  6013. uint8x16_t t0t1_h = vandq_u8(lm_p1, k48_32);
  6014. uint8x16_t t0t1_l = veorq_u8(t0t1_tmp, t0t1_h);
  6015. // t2 = (N) (P4 + P5) << 24
  6016. // t3 = (K) (P6 + P7) << 32
  6017. uint8x16_t t2t3_tmp = veorq_u8(nk_p0, nk_p1);
  6018. uint8x16_t t2t3_h = vandq_u8(nk_p1, k16_00);
  6019. uint8x16_t t2t3_l = veorq_u8(t2t3_tmp, t2t3_h);
  6020. // De-interleave
  6021. #if defined(__aarch64__)
  6022. uint8x16_t t0 = vreinterpretq_u8_u64(
  6023. vuzp1q_u64(vreinterpretq_u64_u8(t0t1_l), vreinterpretq_u64_u8(t0t1_h)));
  6024. uint8x16_t t1 = vreinterpretq_u8_u64(
  6025. vuzp2q_u64(vreinterpretq_u64_u8(t0t1_l), vreinterpretq_u64_u8(t0t1_h)));
  6026. uint8x16_t t2 = vreinterpretq_u8_u64(
  6027. vuzp1q_u64(vreinterpretq_u64_u8(t2t3_l), vreinterpretq_u64_u8(t2t3_h)));
  6028. uint8x16_t t3 = vreinterpretq_u8_u64(
  6029. vuzp2q_u64(vreinterpretq_u64_u8(t2t3_l), vreinterpretq_u64_u8(t2t3_h)));
  6030. #else
  6031. uint8x16_t t1 = vcombine_u8(vget_high_u8(t0t1_l), vget_high_u8(t0t1_h));
  6032. uint8x16_t t0 = vcombine_u8(vget_low_u8(t0t1_l), vget_low_u8(t0t1_h));
  6033. uint8x16_t t3 = vcombine_u8(vget_high_u8(t2t3_l), vget_high_u8(t2t3_h));
  6034. uint8x16_t t2 = vcombine_u8(vget_low_u8(t2t3_l), vget_low_u8(t2t3_h));
  6035. #endif
  6036. // Shift the cross products
  6037. uint8x16_t t0_shift = vextq_u8(t0, t0, 15); // t0 << 8
  6038. uint8x16_t t1_shift = vextq_u8(t1, t1, 14); // t1 << 16
  6039. uint8x16_t t2_shift = vextq_u8(t2, t2, 13); // t2 << 24
  6040. uint8x16_t t3_shift = vextq_u8(t3, t3, 12); // t3 << 32
  6041. // Accumulate the products
  6042. uint8x16_t cross1 = veorq_u8(t0_shift, t1_shift);
  6043. uint8x16_t cross2 = veorq_u8(t2_shift, t3_shift);
  6044. uint8x16_t mix = veorq_u8(d, cross1);
  6045. uint8x16_t r = veorq_u8(mix, cross2);
  6046. return vreinterpretq_u64_u8(r);
  6047. }
  6048. #endif // ARMv7 polyfill
  6049. // Perform a carry-less multiplication of two 64-bit integers, selected from a
  6050. // and b according to imm8, and store the results in dst.
  6051. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_clmulepi64_si128
  6052. FORCE_INLINE __m128i _mm_clmulepi64_si128(__m128i _a, __m128i _b, const int imm)
  6053. {
  6054. uint64x2_t a = vreinterpretq_u64_m128i(_a);
  6055. uint64x2_t b = vreinterpretq_u64_m128i(_b);
  6056. switch (imm & 0x11) {
  6057. case 0x00:
  6058. return vreinterpretq_m128i_u64(
  6059. _sse2neon_vmull_p64(vget_low_u64(a), vget_low_u64(b)));
  6060. case 0x01:
  6061. return vreinterpretq_m128i_u64(
  6062. _sse2neon_vmull_p64(vget_high_u64(a), vget_low_u64(b)));
  6063. case 0x10:
  6064. return vreinterpretq_m128i_u64(
  6065. _sse2neon_vmull_p64(vget_low_u64(a), vget_high_u64(b)));
  6066. case 0x11:
  6067. return vreinterpretq_m128i_u64(
  6068. _sse2neon_vmull_p64(vget_high_u64(a), vget_high_u64(b)));
  6069. default:
  6070. abort();
  6071. }
  6072. }
  6073. #if !defined(__ARM_FEATURE_CRYPTO)
  6074. /* clang-format off */
  6075. #define SSE2NEON_AES_DATA(w) \
  6076. { \
  6077. w(0x63), w(0x7c), w(0x77), w(0x7b), w(0xf2), w(0x6b), w(0x6f), \
  6078. w(0xc5), w(0x30), w(0x01), w(0x67), w(0x2b), w(0xfe), w(0xd7), \
  6079. w(0xab), w(0x76), w(0xca), w(0x82), w(0xc9), w(0x7d), w(0xfa), \
  6080. w(0x59), w(0x47), w(0xf0), w(0xad), w(0xd4), w(0xa2), w(0xaf), \
  6081. w(0x9c), w(0xa4), w(0x72), w(0xc0), w(0xb7), w(0xfd), w(0x93), \
  6082. w(0x26), w(0x36), w(0x3f), w(0xf7), w(0xcc), w(0x34), w(0xa5), \
  6083. w(0xe5), w(0xf1), w(0x71), w(0xd8), w(0x31), w(0x15), w(0x04), \
  6084. w(0xc7), w(0x23), w(0xc3), w(0x18), w(0x96), w(0x05), w(0x9a), \
  6085. w(0x07), w(0x12), w(0x80), w(0xe2), w(0xeb), w(0x27), w(0xb2), \
  6086. w(0x75), w(0x09), w(0x83), w(0x2c), w(0x1a), w(0x1b), w(0x6e), \
  6087. w(0x5a), w(0xa0), w(0x52), w(0x3b), w(0xd6), w(0xb3), w(0x29), \
  6088. w(0xe3), w(0x2f), w(0x84), w(0x53), w(0xd1), w(0x00), w(0xed), \
  6089. w(0x20), w(0xfc), w(0xb1), w(0x5b), w(0x6a), w(0xcb), w(0xbe), \
  6090. w(0x39), w(0x4a), w(0x4c), w(0x58), w(0xcf), w(0xd0), w(0xef), \
  6091. w(0xaa), w(0xfb), w(0x43), w(0x4d), w(0x33), w(0x85), w(0x45), \
  6092. w(0xf9), w(0x02), w(0x7f), w(0x50), w(0x3c), w(0x9f), w(0xa8), \
  6093. w(0x51), w(0xa3), w(0x40), w(0x8f), w(0x92), w(0x9d), w(0x38), \
  6094. w(0xf5), w(0xbc), w(0xb6), w(0xda), w(0x21), w(0x10), w(0xff), \
  6095. w(0xf3), w(0xd2), w(0xcd), w(0x0c), w(0x13), w(0xec), w(0x5f), \
  6096. w(0x97), w(0x44), w(0x17), w(0xc4), w(0xa7), w(0x7e), w(0x3d), \
  6097. w(0x64), w(0x5d), w(0x19), w(0x73), w(0x60), w(0x81), w(0x4f), \
  6098. w(0xdc), w(0x22), w(0x2a), w(0x90), w(0x88), w(0x46), w(0xee), \
  6099. w(0xb8), w(0x14), w(0xde), w(0x5e), w(0x0b), w(0xdb), w(0xe0), \
  6100. w(0x32), w(0x3a), w(0x0a), w(0x49), w(0x06), w(0x24), w(0x5c), \
  6101. w(0xc2), w(0xd3), w(0xac), w(0x62), w(0x91), w(0x95), w(0xe4), \
  6102. w(0x79), w(0xe7), w(0xc8), w(0x37), w(0x6d), w(0x8d), w(0xd5), \
  6103. w(0x4e), w(0xa9), w(0x6c), w(0x56), w(0xf4), w(0xea), w(0x65), \
  6104. w(0x7a), w(0xae), w(0x08), w(0xba), w(0x78), w(0x25), w(0x2e), \
  6105. w(0x1c), w(0xa6), w(0xb4), w(0xc6), w(0xe8), w(0xdd), w(0x74), \
  6106. w(0x1f), w(0x4b), w(0xbd), w(0x8b), w(0x8a), w(0x70), w(0x3e), \
  6107. w(0xb5), w(0x66), w(0x48), w(0x03), w(0xf6), w(0x0e), w(0x61), \
  6108. w(0x35), w(0x57), w(0xb9), w(0x86), w(0xc1), w(0x1d), w(0x9e), \
  6109. w(0xe1), w(0xf8), w(0x98), w(0x11), w(0x69), w(0xd9), w(0x8e), \
  6110. w(0x94), w(0x9b), w(0x1e), w(0x87), w(0xe9), w(0xce), w(0x55), \
  6111. w(0x28), w(0xdf), w(0x8c), w(0xa1), w(0x89), w(0x0d), w(0xbf), \
  6112. w(0xe6), w(0x42), w(0x68), w(0x41), w(0x99), w(0x2d), w(0x0f), \
  6113. w(0xb0), w(0x54), w(0xbb), w(0x16) \
  6114. }
  6115. /* clang-format on */
  6116. /* X Macro trick. See https://en.wikipedia.org/wiki/X_Macro */
  6117. #define SSE2NEON_AES_H0(x) (x)
  6118. static const uint8_t SSE2NEON_sbox[256] = SSE2NEON_AES_DATA(SSE2NEON_AES_H0);
  6119. #undef SSE2NEON_AES_H0
  6120. // In the absence of crypto extensions, implement aesenc using regular neon
  6121. // intrinsics instead. See:
  6122. // https://www.workofard.com/2017/01/accelerated-aes-for-the-arm64-linux-kernel/
  6123. // https://www.workofard.com/2017/07/ghash-for-low-end-cores/ and
  6124. // https://github.com/ColinIanKing/linux-next-mirror/blob/b5f466091e130caaf0735976648f72bd5e09aa84/crypto/aegis128-neon-inner.c#L52
  6125. // for more information Reproduced with permission of the author.
  6126. FORCE_INLINE __m128i _mm_aesenc_si128(__m128i EncBlock, __m128i RoundKey)
  6127. {
  6128. #if defined(__aarch64__)
  6129. static const uint8_t shift_rows[] = {0x0, 0x5, 0xa, 0xf, 0x4, 0x9,
  6130. 0xe, 0x3, 0x8, 0xd, 0x2, 0x7,
  6131. 0xc, 0x1, 0x6, 0xb};
  6132. static const uint8_t ror32by8[] = {0x1, 0x2, 0x3, 0x0, 0x5, 0x6, 0x7, 0x4,
  6133. 0x9, 0xa, 0xb, 0x8, 0xd, 0xe, 0xf, 0xc};
  6134. uint8x16_t v;
  6135. uint8x16_t w = vreinterpretq_u8_m128i(EncBlock);
  6136. // shift rows
  6137. w = vqtbl1q_u8(w, vld1q_u8(shift_rows));
  6138. // sub bytes
  6139. v = vqtbl4q_u8(_sse2neon_vld1q_u8_x4(SSE2NEON_sbox), w);
  6140. v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(SSE2NEON_sbox + 0x40), w - 0x40);
  6141. v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(SSE2NEON_sbox + 0x80), w - 0x80);
  6142. v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(SSE2NEON_sbox + 0xc0), w - 0xc0);
  6143. // mix columns
  6144. w = (v << 1) ^ (uint8x16_t)(((int8x16_t) v >> 7) & 0x1b);
  6145. w ^= (uint8x16_t) vrev32q_u16((uint16x8_t) v);
  6146. w ^= vqtbl1q_u8(v ^ w, vld1q_u8(ror32by8));
  6147. // add round key
  6148. return vreinterpretq_m128i_u8(w) ^ RoundKey;
  6149. #else /* ARMv7-A NEON implementation */
  6150. #define SSE2NEON_AES_B2W(b0, b1, b2, b3) \
  6151. (((uint32_t)(b3) << 24) | ((uint32_t)(b2) << 16) | ((uint32_t)(b1) << 8) | \
  6152. (b0))
  6153. #define SSE2NEON_AES_F2(x) ((x << 1) ^ (((x >> 7) & 1) * 0x011b /* WPOLY */))
  6154. #define SSE2NEON_AES_F3(x) (SSE2NEON_AES_F2(x) ^ x)
  6155. #define SSE2NEON_AES_U0(p) \
  6156. SSE2NEON_AES_B2W(SSE2NEON_AES_F2(p), p, p, SSE2NEON_AES_F3(p))
  6157. #define SSE2NEON_AES_U1(p) \
  6158. SSE2NEON_AES_B2W(SSE2NEON_AES_F3(p), SSE2NEON_AES_F2(p), p, p)
  6159. #define SSE2NEON_AES_U2(p) \
  6160. SSE2NEON_AES_B2W(p, SSE2NEON_AES_F3(p), SSE2NEON_AES_F2(p), p)
  6161. #define SSE2NEON_AES_U3(p) \
  6162. SSE2NEON_AES_B2W(p, p, SSE2NEON_AES_F3(p), SSE2NEON_AES_F2(p))
  6163. static const uint32_t ALIGN_STRUCT(16) aes_table[4][256] = {
  6164. SSE2NEON_AES_DATA(SSE2NEON_AES_U0),
  6165. SSE2NEON_AES_DATA(SSE2NEON_AES_U1),
  6166. SSE2NEON_AES_DATA(SSE2NEON_AES_U2),
  6167. SSE2NEON_AES_DATA(SSE2NEON_AES_U3),
  6168. };
  6169. #undef SSE2NEON_AES_B2W
  6170. #undef SSE2NEON_AES_F2
  6171. #undef SSE2NEON_AES_F3
  6172. #undef SSE2NEON_AES_U0
  6173. #undef SSE2NEON_AES_U1
  6174. #undef SSE2NEON_AES_U2
  6175. #undef SSE2NEON_AES_U3
  6176. uint32_t x0 = _mm_cvtsi128_si32(EncBlock);
  6177. uint32_t x1 = _mm_cvtsi128_si32(_mm_shuffle_epi32(EncBlock, 0x55));
  6178. uint32_t x2 = _mm_cvtsi128_si32(_mm_shuffle_epi32(EncBlock, 0xAA));
  6179. uint32_t x3 = _mm_cvtsi128_si32(_mm_shuffle_epi32(EncBlock, 0xFF));
  6180. __m128i out = _mm_set_epi32(
  6181. (aes_table[0][x3 & 0xff] ^ aes_table[1][(x0 >> 8) & 0xff] ^
  6182. aes_table[2][(x1 >> 16) & 0xff] ^ aes_table[3][x2 >> 24]),
  6183. (aes_table[0][x2 & 0xff] ^ aes_table[1][(x3 >> 8) & 0xff] ^
  6184. aes_table[2][(x0 >> 16) & 0xff] ^ aes_table[3][x1 >> 24]),
  6185. (aes_table[0][x1 & 0xff] ^ aes_table[1][(x2 >> 8) & 0xff] ^
  6186. aes_table[2][(x3 >> 16) & 0xff] ^ aes_table[3][x0 >> 24]),
  6187. (aes_table[0][x0 & 0xff] ^ aes_table[1][(x1 >> 8) & 0xff] ^
  6188. aes_table[2][(x2 >> 16) & 0xff] ^ aes_table[3][x3 >> 24]));
  6189. return _mm_xor_si128(out, RoundKey);
  6190. #endif
  6191. }
  6192. // Perform the last round of an AES encryption flow on data (state) in a using
  6193. // the round key in RoundKey, and store the result in dst.
  6194. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_aesenclast_si128
  6195. FORCE_INLINE __m128i _mm_aesenclast_si128(__m128i a, __m128i RoundKey)
  6196. {
  6197. /* FIXME: optimized for NEON */
  6198. uint8_t v[4][4] = {
  6199. [0] = {SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 0)],
  6200. SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 5)],
  6201. SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 10)],
  6202. SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 15)]},
  6203. [1] = {SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 4)],
  6204. SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 9)],
  6205. SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 14)],
  6206. SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 3)]},
  6207. [2] = {SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 8)],
  6208. SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 13)],
  6209. SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 2)],
  6210. SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 7)]},
  6211. [3] = {SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 12)],
  6212. SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 1)],
  6213. SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 6)],
  6214. SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 11)]},
  6215. };
  6216. for (int i = 0; i < 16; i++)
  6217. vreinterpretq_nth_u8_m128i(a, i) =
  6218. v[i / 4][i % 4] ^ vreinterpretq_nth_u8_m128i(RoundKey, i);
  6219. return a;
  6220. }
  6221. // Emits the Advanced Encryption Standard (AES) instruction aeskeygenassist.
  6222. // This instruction generates a round key for AES encryption. See
  6223. // https://kazakov.life/2017/11/01/cryptocurrency-mining-on-ios-devices/
  6224. // for details.
  6225. //
  6226. // https://msdn.microsoft.com/en-us/library/cc714138(v=vs.120).aspx
  6227. FORCE_INLINE __m128i _mm_aeskeygenassist_si128(__m128i key, const int rcon)
  6228. {
  6229. uint32_t X1 = _mm_cvtsi128_si32(_mm_shuffle_epi32(key, 0x55));
  6230. uint32_t X3 = _mm_cvtsi128_si32(_mm_shuffle_epi32(key, 0xFF));
  6231. for (int i = 0; i < 4; ++i) {
  6232. ((uint8_t *) &X1)[i] = SSE2NEON_sbox[((uint8_t *) &X1)[i]];
  6233. ((uint8_t *) &X3)[i] = SSE2NEON_sbox[((uint8_t *) &X3)[i]];
  6234. }
  6235. return _mm_set_epi32(((X3 >> 8) | (X3 << 24)) ^ rcon, X3,
  6236. ((X1 >> 8) | (X1 << 24)) ^ rcon, X1);
  6237. }
  6238. #undef SSE2NEON_AES_DATA
  6239. #else /* __ARM_FEATURE_CRYPTO */
  6240. // Implements equivalent of 'aesenc' by combining AESE (with an empty key) and
  6241. // AESMC and then manually applying the real key as an xor operation. This
  6242. // unfortunately means an additional xor op; the compiler should be able to
  6243. // optimize this away for repeated calls however. See
  6244. // https://blog.michaelbrase.com/2018/05/08/emulating-x86-aes-intrinsics-on-armv8-a
  6245. // for more details.
  6246. FORCE_INLINE __m128i _mm_aesenc_si128(__m128i a, __m128i b)
  6247. {
  6248. return vreinterpretq_m128i_u8(
  6249. vaesmcq_u8(vaeseq_u8(vreinterpretq_u8_m128i(a), vdupq_n_u8(0))) ^
  6250. vreinterpretq_u8_m128i(b));
  6251. }
  6252. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_aesenclast_si128
  6253. FORCE_INLINE __m128i _mm_aesenclast_si128(__m128i a, __m128i RoundKey)
  6254. {
  6255. return _mm_xor_si128(vreinterpretq_m128i_u8(vaeseq_u8(
  6256. vreinterpretq_u8_m128i(a), vdupq_n_u8(0))),
  6257. RoundKey);
  6258. }
  6259. FORCE_INLINE __m128i _mm_aeskeygenassist_si128(__m128i a, const int rcon)
  6260. {
  6261. // AESE does ShiftRows and SubBytes on A
  6262. uint8x16_t u8 = vaeseq_u8(vreinterpretq_u8_m128i(a), vdupq_n_u8(0));
  6263. uint8x16_t dest = {
  6264. // Undo ShiftRows step from AESE and extract X1 and X3
  6265. u8[0x4], u8[0x1], u8[0xE], u8[0xB], // SubBytes(X1)
  6266. u8[0x1], u8[0xE], u8[0xB], u8[0x4], // ROT(SubBytes(X1))
  6267. u8[0xC], u8[0x9], u8[0x6], u8[0x3], // SubBytes(X3)
  6268. u8[0x9], u8[0x6], u8[0x3], u8[0xC], // ROT(SubBytes(X3))
  6269. };
  6270. uint32x4_t r = {0, (unsigned) rcon, 0, (unsigned) rcon};
  6271. return vreinterpretq_m128i_u8(dest) ^ vreinterpretq_m128i_u32(r);
  6272. }
  6273. #endif
  6274. /* Streaming Extensions */
  6275. // Guarantees that every preceding store is globally visible before any
  6276. // subsequent store.
  6277. // https://msdn.microsoft.com/en-us/library/5h2w73d1%28v=vs.90%29.aspx
  6278. FORCE_INLINE void _mm_sfence(void)
  6279. {
  6280. __sync_synchronize();
  6281. }
  6282. // Store 128-bits (composed of 4 packed single-precision (32-bit) floating-
  6283. // point elements) from a into memory using a non-temporal memory hint.
  6284. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_stream_ps
  6285. FORCE_INLINE void _mm_stream_ps(float *p, __m128 a)
  6286. {
  6287. #if __has_builtin(__builtin_nontemporal_store)
  6288. __builtin_nontemporal_store(a, (float32x4_t *) p);
  6289. #else
  6290. vst1q_f32(p, vreinterpretq_f32_m128(a));
  6291. #endif
  6292. }
  6293. // Stores the data in a to the address p without polluting the caches. If the
  6294. // cache line containing address p is already in the cache, the cache will be
  6295. // updated.
  6296. // https://msdn.microsoft.com/en-us/library/ba08y07y%28v=vs.90%29.aspx
  6297. FORCE_INLINE void _mm_stream_si128(__m128i *p, __m128i a)
  6298. {
  6299. #if __has_builtin(__builtin_nontemporal_store)
  6300. __builtin_nontemporal_store(a, p);
  6301. #else
  6302. vst1q_s64((int64_t *) p, vreinterpretq_s64_m128i(a));
  6303. #endif
  6304. }
  6305. // Load 128-bits of integer data from memory into dst using a non-temporal
  6306. // memory hint. mem_addr must be aligned on a 16-byte boundary or a
  6307. // general-protection exception may be generated.
  6308. //
  6309. // dst[127:0] := MEM[mem_addr+127:mem_addr]
  6310. //
  6311. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_stream_load_si128
  6312. FORCE_INLINE __m128i _mm_stream_load_si128(__m128i *p)
  6313. {
  6314. #if __has_builtin(__builtin_nontemporal_store)
  6315. return __builtin_nontemporal_load(p);
  6316. #else
  6317. return vreinterpretq_m128i_s64(vld1q_s64((int64_t *) p));
  6318. #endif
  6319. }
  6320. // Cache line containing p is flushed and invalidated from all caches in the
  6321. // coherency domain. :
  6322. // https://msdn.microsoft.com/en-us/library/ba08y07y(v=vs.100).aspx
  6323. FORCE_INLINE void _mm_clflush(void const *p)
  6324. {
  6325. (void) p;
  6326. // no corollary for Neon?
  6327. }
  6328. // Allocate aligned blocks of memory.
  6329. // https://software.intel.com/en-us/
  6330. // cpp-compiler-developer-guide-and-reference-allocating-and-freeing-aligned-memory-blocks
  6331. FORCE_INLINE void *_mm_malloc(size_t size, size_t align)
  6332. {
  6333. void *ptr;
  6334. if (align == 1)
  6335. return malloc(size);
  6336. if (align == 2 || (sizeof(void *) == 8 && align == 4))
  6337. align = sizeof(void *);
  6338. if (!posix_memalign(&ptr, align, size))
  6339. return ptr;
  6340. return NULL;
  6341. }
  6342. // Free aligned memory that was allocated with _mm_malloc.
  6343. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_free
  6344. FORCE_INLINE void _mm_free(void *addr)
  6345. {
  6346. free(addr);
  6347. }
  6348. // Starting with the initial value in crc, accumulates a CRC32 value for
  6349. // unsigned 8-bit integer v.
  6350. // https://msdn.microsoft.com/en-us/library/bb514036(v=vs.100)
  6351. FORCE_INLINE uint32_t _mm_crc32_u8(uint32_t crc, uint8_t v)
  6352. {
  6353. #if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
  6354. __asm__ __volatile__("crc32cb %w[c], %w[c], %w[v]\n\t"
  6355. : [c] "+r"(crc)
  6356. : [v] "r"(v));
  6357. #else
  6358. crc ^= v;
  6359. for (int bit = 0; bit < 8; bit++) {
  6360. if (crc & 1)
  6361. crc = (crc >> 1) ^ UINT32_C(0x82f63b78);
  6362. else
  6363. crc = (crc >> 1);
  6364. }
  6365. #endif
  6366. return crc;
  6367. }
  6368. // Starting with the initial value in crc, accumulates a CRC32 value for
  6369. // unsigned 16-bit integer v.
  6370. // https://msdn.microsoft.com/en-us/library/bb531411(v=vs.100)
  6371. FORCE_INLINE uint32_t _mm_crc32_u16(uint32_t crc, uint16_t v)
  6372. {
  6373. #if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
  6374. __asm__ __volatile__("crc32ch %w[c], %w[c], %w[v]\n\t"
  6375. : [c] "+r"(crc)
  6376. : [v] "r"(v));
  6377. #else
  6378. crc = _mm_crc32_u8(crc, v & 0xff);
  6379. crc = _mm_crc32_u8(crc, (v >> 8) & 0xff);
  6380. #endif
  6381. return crc;
  6382. }
  6383. // Starting with the initial value in crc, accumulates a CRC32 value for
  6384. // unsigned 32-bit integer v.
  6385. // https://msdn.microsoft.com/en-us/library/bb531394(v=vs.100)
  6386. FORCE_INLINE uint32_t _mm_crc32_u32(uint32_t crc, uint32_t v)
  6387. {
  6388. #if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
  6389. __asm__ __volatile__("crc32cw %w[c], %w[c], %w[v]\n\t"
  6390. : [c] "+r"(crc)
  6391. : [v] "r"(v));
  6392. #else
  6393. crc = _mm_crc32_u16(crc, v & 0xffff);
  6394. crc = _mm_crc32_u16(crc, (v >> 16) & 0xffff);
  6395. #endif
  6396. return crc;
  6397. }
  6398. // Starting with the initial value in crc, accumulates a CRC32 value for
  6399. // unsigned 64-bit integer v.
  6400. // https://msdn.microsoft.com/en-us/library/bb514033(v=vs.100)
  6401. FORCE_INLINE uint64_t _mm_crc32_u64(uint64_t crc, uint64_t v)
  6402. {
  6403. #if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
  6404. __asm__ __volatile__("crc32cx %w[c], %w[c], %x[v]\n\t"
  6405. : [c] "+r"(crc)
  6406. : [v] "r"(v));
  6407. #else
  6408. crc = _mm_crc32_u32((uint32_t)(crc), v & 0xffffffff);
  6409. crc = _mm_crc32_u32((uint32_t)(crc), (v >> 32) & 0xffffffff);
  6410. #endif
  6411. return crc;
  6412. }
  6413. #if defined(__GNUC__) || defined(__clang__)
  6414. #pragma pop_macro("ALIGN_STRUCT")
  6415. #pragma pop_macro("FORCE_INLINE")
  6416. #endif
  6417. #if defined(__GNUC__) && !defined(__clang__)
  6418. #pragma GCC pop_options
  6419. #endif
  6420. #endif