sse2neon.h 392 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121
  1. #ifndef SSE2NEON_H
  2. #define SSE2NEON_H
  3. // This header file provides a simple API translation layer
  4. // between SSE intrinsics to their corresponding Arm/Aarch64 NEON versions
  5. //
  6. // Contributors to this work are:
  7. // John W. Ratcliff <[email protected]>
  8. // Brandon Rowlett <[email protected]>
  9. // Ken Fast <[email protected]>
  10. // Eric van Beurden <[email protected]>
  11. // Alexander Potylitsin <[email protected]>
  12. // Hasindu Gamaarachchi <[email protected]>
  13. // Jim Huang <[email protected]>
  14. // Mark Cheng <[email protected]>
  15. // Malcolm James MacLeod <[email protected]>
  16. // Devin Hussey (easyaspi314) <[email protected]>
  17. // Sebastian Pop <[email protected]>
  18. // Developer Ecosystem Engineering <[email protected]>
  19. // Danila Kutenin <[email protected]>
  20. // François Turban (JishinMaster) <[email protected]>
  21. // Pei-Hsuan Hung <[email protected]>
  22. // Yang-Hao Yuan <[email protected]>
  23. // Syoyo Fujita <[email protected]>
  24. // Brecht Van Lommel <[email protected]>
  25. // Jonathan Hue <[email protected]>
  26. // Cuda Chen <[email protected]>
  27. // Aymen Qader <[email protected]>
  28. /*
  29. * sse2neon is freely redistributable under the MIT License.
  30. *
  31. * Permission is hereby granted, free of charge, to any person obtaining a copy
  32. * of this software and associated documentation files (the "Software"), to deal
  33. * in the Software without restriction, including without limitation the rights
  34. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  35. * copies of the Software, and to permit persons to whom the Software is
  36. * furnished to do so, subject to the following conditions:
  37. *
  38. * The above copyright notice and this permission notice shall be included in
  39. * all copies or substantial portions of the Software.
  40. *
  41. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  42. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  43. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  44. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  45. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  46. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  47. * SOFTWARE.
  48. */
  49. /* Tunable configurations */
  50. /* Enable precise implementation of math operations
  51. * This would slow down the computation a bit, but gives consistent result with
  52. * x86 SSE. (e.g. would solve a hole or NaN pixel in the rendering result)
  53. */
  54. /* _mm_min|max_ps|ss|pd|sd */
  55. #ifndef SSE2NEON_PRECISE_MINMAX
  56. #define SSE2NEON_PRECISE_MINMAX (0)
  57. #endif
  58. /* _mm_rcp_ps and _mm_div_ps */
  59. #ifndef SSE2NEON_PRECISE_DIV
  60. #define SSE2NEON_PRECISE_DIV (0)
  61. #endif
  62. /* _mm_sqrt_ps and _mm_rsqrt_ps */
  63. #ifndef SSE2NEON_PRECISE_SQRT
  64. #define SSE2NEON_PRECISE_SQRT (0)
  65. #endif
  66. /* _mm_dp_pd */
  67. #ifndef SSE2NEON_PRECISE_DP
  68. #define SSE2NEON_PRECISE_DP (0)
  69. #endif
  70. /* compiler specific definitions */
  71. #if defined(__GNUC__) || defined(__clang__)
  72. #pragma push_macro("FORCE_INLINE")
  73. #pragma push_macro("ALIGN_STRUCT")
  74. #define FORCE_INLINE static inline __attribute__((always_inline))
  75. #define ALIGN_STRUCT(x) __attribute__((aligned(x)))
  76. #define _sse2neon_likely(x) __builtin_expect(!!(x), 1)
  77. #define _sse2neon_unlikely(x) __builtin_expect(!!(x), 0)
  78. #else /* non-GNU / non-clang compilers */
  79. #warning "Macro name collisions may happen with unsupported compiler."
  80. #ifndef FORCE_INLINE
  81. #define FORCE_INLINE static inline
  82. #endif
  83. #ifndef ALIGN_STRUCT
  84. #define ALIGN_STRUCT(x) __declspec(align(x))
  85. #endif
  86. #define _sse2neon_likely(x) (x)
  87. #define _sse2neon_unlikely(x) (x)
  88. #endif
  89. /* C language does not allow initializing a variable with a function call. */
  90. #ifdef __cplusplus
  91. #define _sse2neon_const static const
  92. #else
  93. #define _sse2neon_const const
  94. #endif
  95. #include <stdint.h>
  96. #include <stdlib.h>
  97. #if defined(_WIN32) && !defined(__MINGW32__)
  98. /* Definitions for _mm_{malloc,free} are provided by <malloc.h>
  99. * from both MinGW-w64 and MSVC.
  100. */
  101. #define SSE2NEON_ALLOC_DEFINED
  102. #endif
  103. /* If using MSVC */
  104. #ifdef _MSC_VER
  105. #include <intrin.h>
  106. #if (defined(_M_AMD64) || defined(__x86_64__)) || \
  107. (defined(_M_ARM) || defined(__arm__))
  108. #define SSE2NEON_HAS_BITSCAN64
  109. #endif
  110. #endif
  111. /* Compiler barrier */
  112. #define SSE2NEON_BARRIER() \
  113. do { \
  114. __asm__ __volatile__("" ::: "memory"); \
  115. (void) 0; \
  116. } while (0)
  117. /* Memory barriers
  118. * __atomic_thread_fence does not include a compiler barrier; instead,
  119. * the barrier is part of __atomic_load/__atomic_store's "volatile-like"
  120. * semantics.
  121. */
  122. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
  123. #include <stdatomic.h>
  124. #endif
  125. FORCE_INLINE void _sse2neon_smp_mb(void)
  126. {
  127. SSE2NEON_BARRIER();
  128. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && \
  129. !defined(__STDC_NO_ATOMICS__)
  130. atomic_thread_fence(memory_order_seq_cst);
  131. #elif defined(__GNUC__) || defined(__clang__)
  132. __atomic_thread_fence(__ATOMIC_SEQ_CST);
  133. #else
  134. /* FIXME: MSVC support */
  135. #endif
  136. }
  137. /* Architecture-specific build options */
  138. /* FIXME: #pragma GCC push_options is only available on GCC */
  139. #if defined(__GNUC__)
  140. #if defined(__arm__) && __ARM_ARCH == 7
  141. /* According to ARM C Language Extensions Architecture specification,
  142. * __ARM_NEON is defined to a value indicating the Advanced SIMD (NEON)
  143. * architecture supported.
  144. */
  145. #if !defined(__ARM_NEON) || !defined(__ARM_NEON__)
  146. #error "You must enable NEON instructions (e.g. -mfpu=neon) to use SSE2NEON."
  147. #endif
  148. #if !defined(__clang__)
  149. #pragma GCC push_options
  150. #pragma GCC target("fpu=neon")
  151. #endif
  152. #elif defined(__aarch64__)
  153. #if !defined(__clang__)
  154. #pragma GCC push_options
  155. #pragma GCC target("+simd")
  156. #endif
  157. #elif __ARM_ARCH == 8
  158. #if !defined(__ARM_NEON) || !defined(__ARM_NEON__)
  159. #error \
  160. "You must enable NEON instructions (e.g. -mfpu=neon-fp-armv8) to use SSE2NEON."
  161. #endif
  162. #if !defined(__clang__)
  163. #pragma GCC push_options
  164. #endif
  165. #else
  166. #error "Unsupported target. Must be either ARMv7-A+NEON or ARMv8-A."
  167. #endif
  168. #endif
  169. #include <arm_neon.h>
  170. #if !defined(__aarch64__) && (__ARM_ARCH == 8)
  171. #if defined __has_include && __has_include(<arm_acle.h>)
  172. #include <arm_acle.h>
  173. #endif
  174. #endif
  175. /* Apple Silicon cache lines are double of what is commonly used by Intel, AMD
  176. * and other Arm microarchtectures use.
  177. * From sysctl -a on Apple M1:
  178. * hw.cachelinesize: 128
  179. */
  180. #if defined(__APPLE__) && (defined(__aarch64__) || defined(__arm64__))
  181. #define SSE2NEON_CACHELINE_SIZE 128
  182. #else
  183. #define SSE2NEON_CACHELINE_SIZE 64
  184. #endif
  185. /* Rounding functions require either Aarch64 instructions or libm failback */
  186. #if !defined(__aarch64__)
  187. #include <math.h>
  188. #endif
  189. /* On ARMv7, some registers, such as PMUSERENR and PMCCNTR, are read-only
  190. * or even not accessible in user mode.
  191. * To write or access to these registers in user mode,
  192. * we have to perform syscall instead.
  193. */
  194. #if !defined(__aarch64__)
  195. #include <sys/time.h>
  196. #endif
  197. /* "__has_builtin" can be used to query support for built-in functions
  198. * provided by gcc/clang and other compilers that support it.
  199. */
  200. #ifndef __has_builtin /* GCC prior to 10 or non-clang compilers */
  201. /* Compatibility with gcc <= 9 */
  202. #if defined(__GNUC__) && (__GNUC__ <= 9)
  203. #define __has_builtin(x) HAS##x
  204. #define HAS__builtin_popcount 1
  205. #define HAS__builtin_popcountll 1
  206. // __builtin_shuffle introduced in GCC 4.7.0
  207. #if (__GNUC__ >= 5) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7))
  208. #define HAS__builtin_shuffle 1
  209. #else
  210. #define HAS__builtin_shuffle 0
  211. #endif
  212. #define HAS__builtin_shufflevector 0
  213. #define HAS__builtin_nontemporal_store 0
  214. #else
  215. #define __has_builtin(x) 0
  216. #endif
  217. #endif
  218. /**
  219. * MACRO for shuffle parameter for _mm_shuffle_ps().
  220. * Argument fp3 is a digit[0123] that represents the fp from argument "b"
  221. * of mm_shuffle_ps that will be placed in fp3 of result. fp2 is the same
  222. * for fp2 in result. fp1 is a digit[0123] that represents the fp from
  223. * argument "a" of mm_shuffle_ps that will be places in fp1 of result.
  224. * fp0 is the same for fp0 of result.
  225. */
  226. #if defined(__aarch64__)
  227. #define _MN_SHUFFLE(fp3,fp2,fp1,fp0) ( (uint8x16_t){ (((fp3)*4)+0), (((fp3)*4)+1), (((fp3)*4)+2), (((fp3)*4)+3), (((fp2)*4)+0), (((fp2)*4)+1), (((fp2)*4)+\
  228. 2), (((fp2)*4)+3), (((fp1)*4)+0), (((fp1)*4)+1), (((fp1)*4)+2), (((fp1)*4)+3), (((fp0)*4)+0), (((fp0)*4)+1), (((fp0)*4)+2), (((fp0)*4)+3) } )
  229. #define _MF_SHUFFLE(fp3,fp2,fp1,fp0) ( (uint8x16_t){ (((fp3)*4)+0), (((fp3)*4)+1), (((fp3)*4)+2), (((fp3)*4)+3), (((fp2)*4)+0), (((fp2)*4)+1), (((fp2)*4)+\
  230. 2), (((fp2)*4)+3), (((fp1)*4)+16+0), (((fp1)*4)+16+1), (((fp1)*4)+16+2), (((fp1)*4)+16+3), (((fp0)*4)+16+0), (((fp0)*4)+16+1), (((fp0)*4)+16+2), (((fp0)*\
  231. 4)+16+3) } )
  232. #endif
  233. #define _MM_SHUFFLE(fp3, fp2, fp1, fp0) \
  234. (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | ((fp0)))
  235. #if __has_builtin(__builtin_shufflevector)
  236. #define _sse2neon_shuffle(type, a, b, ...) \
  237. __builtin_shufflevector(a, b, __VA_ARGS__)
  238. #elif __has_builtin(__builtin_shuffle)
  239. #define _sse2neon_shuffle(type, a, b, ...) \
  240. __extension__({ \
  241. type tmp = {__VA_ARGS__}; \
  242. __builtin_shuffle(a, b, tmp); \
  243. })
  244. #endif
  245. #ifdef _sse2neon_shuffle
  246. #define vshuffle_s16(a, b, ...) _sse2neon_shuffle(int16x4_t, a, b, __VA_ARGS__)
  247. #define vshuffleq_s16(a, b, ...) _sse2neon_shuffle(int16x8_t, a, b, __VA_ARGS__)
  248. #define vshuffle_s32(a, b, ...) _sse2neon_shuffle(int32x2_t, a, b, __VA_ARGS__)
  249. #define vshuffleq_s32(a, b, ...) _sse2neon_shuffle(int32x4_t, a, b, __VA_ARGS__)
  250. #define vshuffle_s64(a, b, ...) _sse2neon_shuffle(int64x1_t, a, b, __VA_ARGS__)
  251. #define vshuffleq_s64(a, b, ...) _sse2neon_shuffle(int64x2_t, a, b, __VA_ARGS__)
  252. #endif
  253. /* Rounding mode macros. */
  254. #define _MM_FROUND_TO_NEAREST_INT 0x00
  255. #define _MM_FROUND_TO_NEG_INF 0x01
  256. #define _MM_FROUND_TO_POS_INF 0x02
  257. #define _MM_FROUND_TO_ZERO 0x03
  258. #define _MM_FROUND_CUR_DIRECTION 0x04
  259. #define _MM_FROUND_NO_EXC 0x08
  260. #define _MM_FROUND_RAISE_EXC 0x00
  261. #define _MM_FROUND_NINT (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_RAISE_EXC)
  262. #define _MM_FROUND_FLOOR (_MM_FROUND_TO_NEG_INF | _MM_FROUND_RAISE_EXC)
  263. #define _MM_FROUND_CEIL (_MM_FROUND_TO_POS_INF | _MM_FROUND_RAISE_EXC)
  264. #define _MM_FROUND_TRUNC (_MM_FROUND_TO_ZERO | _MM_FROUND_RAISE_EXC)
  265. #define _MM_FROUND_RINT (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_RAISE_EXC)
  266. #define _MM_FROUND_NEARBYINT (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC)
  267. #define _MM_ROUND_NEAREST 0x0000
  268. #define _MM_ROUND_DOWN 0x2000
  269. #define _MM_ROUND_UP 0x4000
  270. #define _MM_ROUND_TOWARD_ZERO 0x6000
  271. /* Flush zero mode macros. */
  272. #define _MM_FLUSH_ZERO_MASK 0x8000
  273. #define _MM_FLUSH_ZERO_ON 0x8000
  274. #define _MM_FLUSH_ZERO_OFF 0x0000
  275. /* Denormals are zeros mode macros. */
  276. #define _MM_DENORMALS_ZERO_MASK 0x0040
  277. #define _MM_DENORMALS_ZERO_ON 0x0040
  278. #define _MM_DENORMALS_ZERO_OFF 0x0000
  279. /* indicate immediate constant argument in a given range */
  280. #define __constrange(a, b) const
  281. /* A few intrinsics accept traditional data types like ints or floats, but
  282. * most operate on data types that are specific to SSE.
  283. * If a vector type ends in d, it contains doubles, and if it does not have
  284. * a suffix, it contains floats. An integer vector type can contain any type
  285. * of integer, from chars to shorts to unsigned long longs.
  286. */
  287. typedef int64x1_t __m64;
  288. typedef float32x4_t __m128; /* 128-bit vector containing 4 floats */
  289. // On ARM 32-bit architecture, the float64x2_t is not supported.
  290. // The data type __m128d should be represented in a different way for related
  291. // intrinsic conversion.
  292. #if defined(__aarch64__)
  293. typedef float64x2_t __m128d; /* 128-bit vector containing 2 doubles */
  294. #else
  295. typedef float32x4_t __m128d;
  296. #endif
  297. typedef int64x2_t __m128i; /* 128-bit vector containing integers */
  298. // __int64 is defined in the Intrinsics Guide which maps to different datatype
  299. // in different data model
  300. #if !(defined(_WIN32) || defined(_WIN64) || defined(__int64))
  301. #if (defined(__x86_64__) || defined(__i386__))
  302. #define __int64 long long
  303. #else
  304. #define __int64 int64_t
  305. #endif
  306. #endif
  307. /* type-safe casting between types */
  308. #define vreinterpretq_m128_f16(x) vreinterpretq_f32_f16(x)
  309. #define vreinterpretq_m128_f32(x) (x)
  310. #define vreinterpretq_m128_f64(x) vreinterpretq_f32_f64(x)
  311. #define vreinterpretq_m128_u8(x) vreinterpretq_f32_u8(x)
  312. #define vreinterpretq_m128_u16(x) vreinterpretq_f32_u16(x)
  313. #define vreinterpretq_m128_u32(x) vreinterpretq_f32_u32(x)
  314. #define vreinterpretq_m128_u64(x) vreinterpretq_f32_u64(x)
  315. #define vreinterpretq_m128_s8(x) vreinterpretq_f32_s8(x)
  316. #define vreinterpretq_m128_s16(x) vreinterpretq_f32_s16(x)
  317. #define vreinterpretq_m128_s32(x) vreinterpretq_f32_s32(x)
  318. #define vreinterpretq_m128_s64(x) vreinterpretq_f32_s64(x)
  319. #define vreinterpretq_f16_m128(x) vreinterpretq_f16_f32(x)
  320. #define vreinterpretq_f32_m128(x) (x)
  321. #define vreinterpretq_f64_m128(x) vreinterpretq_f64_f32(x)
  322. #define vreinterpretq_u8_m128(x) vreinterpretq_u8_f32(x)
  323. #define vreinterpretq_u16_m128(x) vreinterpretq_u16_f32(x)
  324. #define vreinterpretq_u32_m128(x) vreinterpretq_u32_f32(x)
  325. #define vreinterpretq_u64_m128(x) vreinterpretq_u64_f32(x)
  326. #define vreinterpretq_s8_m128(x) vreinterpretq_s8_f32(x)
  327. #define vreinterpretq_s16_m128(x) vreinterpretq_s16_f32(x)
  328. #define vreinterpretq_s32_m128(x) vreinterpretq_s32_f32(x)
  329. #define vreinterpretq_s64_m128(x) vreinterpretq_s64_f32(x)
  330. #define vreinterpretq_m128i_s8(x) vreinterpretq_s64_s8(x)
  331. #define vreinterpretq_m128i_s16(x) vreinterpretq_s64_s16(x)
  332. #define vreinterpretq_m128i_s32(x) vreinterpretq_s64_s32(x)
  333. #define vreinterpretq_m128i_s64(x) (x)
  334. #define vreinterpretq_m128i_u8(x) vreinterpretq_s64_u8(x)
  335. #define vreinterpretq_m128i_u16(x) vreinterpretq_s64_u16(x)
  336. #define vreinterpretq_m128i_u32(x) vreinterpretq_s64_u32(x)
  337. #define vreinterpretq_m128i_u64(x) vreinterpretq_s64_u64(x)
  338. #define vreinterpretq_f32_m128i(x) vreinterpretq_f32_s64(x)
  339. #define vreinterpretq_f64_m128i(x) vreinterpretq_f64_s64(x)
  340. #define vreinterpretq_s8_m128i(x) vreinterpretq_s8_s64(x)
  341. #define vreinterpretq_s16_m128i(x) vreinterpretq_s16_s64(x)
  342. #define vreinterpretq_s32_m128i(x) vreinterpretq_s32_s64(x)
  343. #define vreinterpretq_s64_m128i(x) (x)
  344. #define vreinterpretq_u8_m128i(x) vreinterpretq_u8_s64(x)
  345. #define vreinterpretq_u16_m128i(x) vreinterpretq_u16_s64(x)
  346. #define vreinterpretq_u32_m128i(x) vreinterpretq_u32_s64(x)
  347. #define vreinterpretq_u64_m128i(x) vreinterpretq_u64_s64(x)
  348. #define vreinterpret_m64_s8(x) vreinterpret_s64_s8(x)
  349. #define vreinterpret_m64_s16(x) vreinterpret_s64_s16(x)
  350. #define vreinterpret_m64_s32(x) vreinterpret_s64_s32(x)
  351. #define vreinterpret_m64_s64(x) (x)
  352. #define vreinterpret_m64_u8(x) vreinterpret_s64_u8(x)
  353. #define vreinterpret_m64_u16(x) vreinterpret_s64_u16(x)
  354. #define vreinterpret_m64_u32(x) vreinterpret_s64_u32(x)
  355. #define vreinterpret_m64_u64(x) vreinterpret_s64_u64(x)
  356. #define vreinterpret_m64_f16(x) vreinterpret_s64_f16(x)
  357. #define vreinterpret_m64_f32(x) vreinterpret_s64_f32(x)
  358. #define vreinterpret_m64_f64(x) vreinterpret_s64_f64(x)
  359. #define vreinterpret_u8_m64(x) vreinterpret_u8_s64(x)
  360. #define vreinterpret_u16_m64(x) vreinterpret_u16_s64(x)
  361. #define vreinterpret_u32_m64(x) vreinterpret_u32_s64(x)
  362. #define vreinterpret_u64_m64(x) vreinterpret_u64_s64(x)
  363. #define vreinterpret_s8_m64(x) vreinterpret_s8_s64(x)
  364. #define vreinterpret_s16_m64(x) vreinterpret_s16_s64(x)
  365. #define vreinterpret_s32_m64(x) vreinterpret_s32_s64(x)
  366. #define vreinterpret_s64_m64(x) (x)
  367. #define vreinterpret_f32_m64(x) vreinterpret_f32_s64(x)
  368. #if defined(__aarch64__)
  369. #define vreinterpretq_m128d_s32(x) vreinterpretq_f64_s32(x)
  370. #define vreinterpretq_m128d_s64(x) vreinterpretq_f64_s64(x)
  371. #define vreinterpretq_m128d_u64(x) vreinterpretq_f64_u64(x)
  372. #define vreinterpretq_m128d_f32(x) vreinterpretq_f64_f32(x)
  373. #define vreinterpretq_m128d_f64(x) (x)
  374. #define vreinterpretq_s64_m128d(x) vreinterpretq_s64_f64(x)
  375. #define vreinterpretq_u32_m128d(x) vreinterpretq_u32_f64(x)
  376. #define vreinterpretq_u64_m128d(x) vreinterpretq_u64_f64(x)
  377. #define vreinterpretq_f64_m128d(x) (x)
  378. #define vreinterpretq_f32_m128d(x) vreinterpretq_f32_f64(x)
  379. #else
  380. #define vreinterpretq_m128d_s32(x) vreinterpretq_f32_s32(x)
  381. #define vreinterpretq_m128d_s64(x) vreinterpretq_f32_s64(x)
  382. #define vreinterpretq_m128d_u32(x) vreinterpretq_f32_u32(x)
  383. #define vreinterpretq_m128d_u64(x) vreinterpretq_f32_u64(x)
  384. #define vreinterpretq_m128d_f32(x) (x)
  385. #define vreinterpretq_s64_m128d(x) vreinterpretq_s64_f32(x)
  386. #define vreinterpretq_u32_m128d(x) vreinterpretq_u32_f32(x)
  387. #define vreinterpretq_u64_m128d(x) vreinterpretq_u64_f32(x)
  388. #define vreinterpretq_f32_m128d(x) (x)
  389. #endif
  390. // A struct is defined in this header file called 'SIMDVec' which can be used
  391. // by applications which attempt to access the contents of an __m128 struct
  392. // directly. It is important to note that accessing the __m128 struct directly
  393. // is bad coding practice by Microsoft: @see:
  394. // https://docs.microsoft.com/en-us/cpp/cpp/m128
  395. //
  396. // However, some legacy source code may try to access the contents of an __m128
  397. // struct directly so the developer can use the SIMDVec as an alias for it. Any
  398. // casting must be done manually by the developer, as you cannot cast or
  399. // otherwise alias the base NEON data type for intrinsic operations.
  400. //
  401. // union intended to allow direct access to an __m128 variable using the names
  402. // that the MSVC compiler provides. This union should really only be used when
  403. // trying to access the members of the vector as integer values. GCC/clang
  404. // allow native access to the float members through a simple array access
  405. // operator (in C since 4.6, in C++ since 4.8).
  406. //
  407. // Ideally direct accesses to SIMD vectors should not be used since it can cause
  408. // a performance hit. If it really is needed however, the original __m128
  409. // variable can be aliased with a pointer to this union and used to access
  410. // individual components. The use of this union should be hidden behind a macro
  411. // that is used throughout the codebase to access the members instead of always
  412. // declaring this type of variable.
  413. typedef union ALIGN_STRUCT(16) SIMDVec {
  414. float m128_f32[4]; // as floats - DON'T USE. Added for convenience.
  415. int8_t m128_i8[16]; // as signed 8-bit integers.
  416. int16_t m128_i16[8]; // as signed 16-bit integers.
  417. int32_t m128_i32[4]; // as signed 32-bit integers.
  418. int64_t m128_i64[2]; // as signed 64-bit integers.
  419. uint8_t m128_u8[16]; // as unsigned 8-bit integers.
  420. uint16_t m128_u16[8]; // as unsigned 16-bit integers.
  421. uint32_t m128_u32[4]; // as unsigned 32-bit integers.
  422. uint64_t m128_u64[2]; // as unsigned 64-bit integers.
  423. } SIMDVec;
  424. // casting using SIMDVec
  425. #define vreinterpretq_nth_u64_m128i(x, n) (((SIMDVec *) &x)->m128_u64[n])
  426. #define vreinterpretq_nth_u32_m128i(x, n) (((SIMDVec *) &x)->m128_u32[n])
  427. #define vreinterpretq_nth_u8_m128i(x, n) (((SIMDVec *) &x)->m128_u8[n])
  428. /* SSE macros */
  429. #define _MM_GET_FLUSH_ZERO_MODE _sse2neon_mm_get_flush_zero_mode
  430. #define _MM_SET_FLUSH_ZERO_MODE _sse2neon_mm_set_flush_zero_mode
  431. #define _MM_GET_DENORMALS_ZERO_MODE _sse2neon_mm_get_denormals_zero_mode
  432. #define _MM_SET_DENORMALS_ZERO_MODE _sse2neon_mm_set_denormals_zero_mode
  433. // Function declaration
  434. // SSE
  435. FORCE_INLINE unsigned int _MM_GET_ROUNDING_MODE();
  436. FORCE_INLINE __m128 _mm_move_ss(__m128, __m128);
  437. FORCE_INLINE __m128 _mm_or_ps(__m128, __m128);
  438. FORCE_INLINE __m128 _mm_set_ps1(float);
  439. FORCE_INLINE __m128 _mm_setzero_ps(void);
  440. // SSE2
  441. FORCE_INLINE __m128i _mm_and_si128(__m128i, __m128i);
  442. FORCE_INLINE __m128i _mm_castps_si128(__m128);
  443. FORCE_INLINE __m128i _mm_cmpeq_epi32(__m128i, __m128i);
  444. FORCE_INLINE __m128i _mm_cvtps_epi32(__m128);
  445. FORCE_INLINE __m128d _mm_move_sd(__m128d, __m128d);
  446. FORCE_INLINE __m128i _mm_or_si128(__m128i, __m128i);
  447. FORCE_INLINE __m128i _mm_set_epi32(int, int, int, int);
  448. FORCE_INLINE __m128i _mm_set_epi64x(int64_t, int64_t);
  449. FORCE_INLINE __m128d _mm_set_pd(double, double);
  450. FORCE_INLINE __m128i _mm_set1_epi32(int);
  451. FORCE_INLINE __m128i _mm_setzero_si128();
  452. // SSE4.1
  453. FORCE_INLINE __m128d _mm_ceil_pd(__m128d);
  454. FORCE_INLINE __m128 _mm_ceil_ps(__m128);
  455. FORCE_INLINE __m128d _mm_floor_pd(__m128d);
  456. FORCE_INLINE __m128 _mm_floor_ps(__m128);
  457. FORCE_INLINE __m128d _mm_round_pd(__m128d, int);
  458. FORCE_INLINE __m128 _mm_round_ps(__m128, int);
  459. // SSE4.2
  460. FORCE_INLINE uint32_t _mm_crc32_u8(uint32_t, uint8_t);
  461. /* Backwards compatibility for compilers with lack of specific type support */
  462. // Older gcc does not define vld1q_u8_x4 type
  463. #if defined(__GNUC__) && !defined(__clang__) && \
  464. ((__GNUC__ <= 12 && defined(__arm__)) || \
  465. (__GNUC__ == 10 && __GNUC_MINOR__ < 3 && defined(__aarch64__)) || \
  466. (__GNUC__ <= 9 && defined(__aarch64__)))
  467. FORCE_INLINE uint8x16x4_t _sse2neon_vld1q_u8_x4(const uint8_t *p)
  468. {
  469. uint8x16x4_t ret;
  470. ret.val[0] = vld1q_u8(p + 0);
  471. ret.val[1] = vld1q_u8(p + 16);
  472. ret.val[2] = vld1q_u8(p + 32);
  473. ret.val[3] = vld1q_u8(p + 48);
  474. return ret;
  475. }
  476. #else
  477. // Wraps vld1q_u8_x4
  478. FORCE_INLINE uint8x16x4_t _sse2neon_vld1q_u8_x4(const uint8_t *p)
  479. {
  480. return vld1q_u8_x4(p);
  481. }
  482. #endif
  483. #if !defined(__aarch64__)
  484. /* emulate vaddv u8 variant */
  485. FORCE_INLINE uint8_t _sse2neon_vaddv_u8(uint8x8_t v8)
  486. {
  487. const uint64x1_t v1 = vpaddl_u32(vpaddl_u16(vpaddl_u8(v8)));
  488. return vget_lane_u8(vreinterpret_u8_u64(v1), 0);
  489. }
  490. #else
  491. // Wraps vaddv_u8
  492. FORCE_INLINE uint8_t _sse2neon_vaddv_u8(uint8x8_t v8)
  493. {
  494. return vaddv_u8(v8);
  495. }
  496. #endif
  497. #if !defined(__aarch64__)
  498. /* emulate vaddvq u8 variant */
  499. FORCE_INLINE uint8_t _sse2neon_vaddvq_u8(uint8x16_t a)
  500. {
  501. uint8x8_t tmp = vpadd_u8(vget_low_u8(a), vget_high_u8(a));
  502. uint8_t res = 0;
  503. for (int i = 0; i < 8; ++i)
  504. res += tmp[i];
  505. return res;
  506. }
  507. #else
  508. // Wraps vaddvq_u8
  509. FORCE_INLINE uint8_t _sse2neon_vaddvq_u8(uint8x16_t a)
  510. {
  511. return vaddvq_u8(a);
  512. }
  513. #endif
  514. #if !defined(__aarch64__)
  515. /* emulate vaddvq u16 variant */
  516. FORCE_INLINE uint16_t _sse2neon_vaddvq_u16(uint16x8_t a)
  517. {
  518. uint32x4_t m = vpaddlq_u16(a);
  519. uint64x2_t n = vpaddlq_u32(m);
  520. uint64x1_t o = vget_low_u64(n) + vget_high_u64(n);
  521. return vget_lane_u32((uint32x2_t) o, 0);
  522. }
  523. #else
  524. // Wraps vaddvq_u16
  525. FORCE_INLINE uint16_t _sse2neon_vaddvq_u16(uint16x8_t a)
  526. {
  527. return vaddvq_u16(a);
  528. }
  529. #endif
  530. /* Function Naming Conventions
  531. * The naming convention of SSE intrinsics is straightforward. A generic SSE
  532. * intrinsic function is given as follows:
  533. * _mm_<name>_<data_type>
  534. *
  535. * The parts of this format are given as follows:
  536. * 1. <name> describes the operation performed by the intrinsic
  537. * 2. <data_type> identifies the data type of the function's primary arguments
  538. *
  539. * This last part, <data_type>, is a little complicated. It identifies the
  540. * content of the input values, and can be set to any of the following values:
  541. * + ps - vectors contain floats (ps stands for packed single-precision)
  542. * + pd - vectors cantain doubles (pd stands for packed double-precision)
  543. * + epi8/epi16/epi32/epi64 - vectors contain 8-bit/16-bit/32-bit/64-bit
  544. * signed integers
  545. * + epu8/epu16/epu32/epu64 - vectors contain 8-bit/16-bit/32-bit/64-bit
  546. * unsigned integers
  547. * + si128 - unspecified 128-bit vector or 256-bit vector
  548. * + m128/m128i/m128d - identifies input vector types when they are different
  549. * than the type of the returned vector
  550. *
  551. * For example, _mm_setzero_ps. The _mm implies that the function returns
  552. * a 128-bit vector. The _ps at the end implies that the argument vectors
  553. * contain floats.
  554. *
  555. * A complete example: Byte Shuffle - pshufb (_mm_shuffle_epi8)
  556. * // Set packed 16-bit integers. 128 bits, 8 short, per 16 bits
  557. * __m128i v_in = _mm_setr_epi16(1, 2, 3, 4, 5, 6, 7, 8);
  558. * // Set packed 8-bit integers
  559. * // 128 bits, 16 chars, per 8 bits
  560. * __m128i v_perm = _mm_setr_epi8(1, 0, 2, 3, 8, 9, 10, 11,
  561. * 4, 5, 12, 13, 6, 7, 14, 15);
  562. * // Shuffle packed 8-bit integers
  563. * __m128i v_out = _mm_shuffle_epi8(v_in, v_perm); // pshufb
  564. *
  565. * Data (Number, Binary, Byte Index):
  566. +------+------+-------------+------+------+-------------+
  567. | 1 | 2 | 3 | 4 | Number
  568. +------+------+------+------+------+------+------+------+
  569. | 0000 | 0001 | 0000 | 0010 | 0000 | 0011 | 0000 | 0100 | Binary
  570. +------+------+------+------+------+------+------+------+
  571. | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | Index
  572. +------+------+------+------+------+------+------+------+
  573. +------+------+------+------+------+------+------+------+
  574. | 5 | 6 | 7 | 8 | Number
  575. +------+------+------+------+------+------+------+------+
  576. | 0000 | 0101 | 0000 | 0110 | 0000 | 0111 | 0000 | 1000 | Binary
  577. +------+------+------+------+------+------+------+------+
  578. | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | Index
  579. +------+------+------+------+------+------+------+------+
  580. * Index (Byte Index):
  581. +------+------+------+------+------+------+------+------+
  582. | 1 | 0 | 2 | 3 | 8 | 9 | 10 | 11 |
  583. +------+------+------+------+------+------+------+------+
  584. +------+------+------+------+------+------+------+------+
  585. | 4 | 5 | 12 | 13 | 6 | 7 | 14 | 15 |
  586. +------+------+------+------+------+------+------+------+
  587. * Result:
  588. +------+------+------+------+------+------+------+------+
  589. | 1 | 0 | 2 | 3 | 8 | 9 | 10 | 11 | Index
  590. +------+------+------+------+------+------+------+------+
  591. | 0001 | 0000 | 0000 | 0010 | 0000 | 0101 | 0000 | 0110 | Binary
  592. +------+------+------+------+------+------+------+------+
  593. | 256 | 2 | 5 | 6 | Number
  594. +------+------+------+------+------+------+------+------+
  595. +------+------+------+------+------+------+------+------+
  596. | 4 | 5 | 12 | 13 | 6 | 7 | 14 | 15 | Index
  597. +------+------+------+------+------+------+------+------+
  598. | 0000 | 0011 | 0000 | 0111 | 0000 | 0100 | 0000 | 1000 | Binary
  599. +------+------+------+------+------+------+------+------+
  600. | 3 | 7 | 4 | 8 | Number
  601. +------+------+------+------+------+------+-------------+
  602. */
  603. /* Constants for use with _mm_prefetch. */
  604. enum _mm_hint {
  605. _MM_HINT_NTA = 0, /* load data to L1 and L2 cache, mark it as NTA */
  606. _MM_HINT_T0 = 1, /* load data to L1 and L2 cache */
  607. _MM_HINT_T1 = 2, /* load data to L2 cache only */
  608. _MM_HINT_T2 = 3, /* load data to L2 cache only, mark it as NTA */
  609. };
  610. // The bit field mapping to the FPCR(floating-point control register)
  611. typedef struct {
  612. uint16_t res0;
  613. uint8_t res1 : 6;
  614. uint8_t bit22 : 1;
  615. uint8_t bit23 : 1;
  616. uint8_t bit24 : 1;
  617. uint8_t res2 : 7;
  618. #if defined(__aarch64__)
  619. uint32_t res3;
  620. #endif
  621. } fpcr_bitfield;
  622. // Takes the upper 64 bits of a and places it in the low end of the result
  623. // Takes the lower 64 bits of b and places it into the high end of the result.
  624. FORCE_INLINE __m128 _mm_shuffle_ps_1032(__m128 a, __m128 b)
  625. {
  626. float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
  627. float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
  628. return vreinterpretq_m128_f32(vcombine_f32(a32, b10));
  629. }
  630. // takes the lower two 32-bit values from a and swaps them and places in high
  631. // end of result takes the higher two 32 bit values from b and swaps them and
  632. // places in low end of result.
  633. FORCE_INLINE __m128 _mm_shuffle_ps_2301(__m128 a, __m128 b)
  634. {
  635. float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
  636. float32x2_t b23 = vrev64_f32(vget_high_f32(vreinterpretq_f32_m128(b)));
  637. return vreinterpretq_m128_f32(vcombine_f32(a01, b23));
  638. }
  639. FORCE_INLINE __m128 _mm_shuffle_ps_0321(__m128 a, __m128 b)
  640. {
  641. float32x2_t a21 = vget_high_f32(
  642. vextq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 3));
  643. float32x2_t b03 = vget_low_f32(
  644. vextq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b), 3));
  645. return vreinterpretq_m128_f32(vcombine_f32(a21, b03));
  646. }
  647. FORCE_INLINE __m128 _mm_shuffle_ps_2103(__m128 a, __m128 b)
  648. {
  649. float32x2_t a03 = vget_low_f32(
  650. vextq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 3));
  651. float32x2_t b21 = vget_high_f32(
  652. vextq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b), 3));
  653. return vreinterpretq_m128_f32(vcombine_f32(a03, b21));
  654. }
  655. FORCE_INLINE __m128 _mm_shuffle_ps_1010(__m128 a, __m128 b)
  656. {
  657. float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
  658. float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
  659. return vreinterpretq_m128_f32(vcombine_f32(a10, b10));
  660. }
  661. FORCE_INLINE __m128 _mm_shuffle_ps_1001(__m128 a, __m128 b)
  662. {
  663. float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
  664. float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
  665. return vreinterpretq_m128_f32(vcombine_f32(a01, b10));
  666. }
  667. FORCE_INLINE __m128 _mm_shuffle_ps_0101(__m128 a, __m128 b)
  668. {
  669. float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
  670. float32x2_t b01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(b)));
  671. return vreinterpretq_m128_f32(vcombine_f32(a01, b01));
  672. }
  673. // keeps the low 64 bits of b in the low and puts the high 64 bits of a in the
  674. // high
  675. FORCE_INLINE __m128 _mm_shuffle_ps_3210(__m128 a, __m128 b)
  676. {
  677. float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
  678. float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
  679. return vreinterpretq_m128_f32(vcombine_f32(a10, b32));
  680. }
  681. FORCE_INLINE __m128 _mm_shuffle_ps_0011(__m128 a, __m128 b)
  682. {
  683. float32x2_t a11 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(a)), 1);
  684. float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
  685. return vreinterpretq_m128_f32(vcombine_f32(a11, b00));
  686. }
  687. FORCE_INLINE __m128 _mm_shuffle_ps_0022(__m128 a, __m128 b)
  688. {
  689. float32x2_t a22 =
  690. vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 0);
  691. float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
  692. return vreinterpretq_m128_f32(vcombine_f32(a22, b00));
  693. }
  694. FORCE_INLINE __m128 _mm_shuffle_ps_2200(__m128 a, __m128 b)
  695. {
  696. float32x2_t a00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(a)), 0);
  697. float32x2_t b22 =
  698. vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(b)), 0);
  699. return vreinterpretq_m128_f32(vcombine_f32(a00, b22));
  700. }
  701. FORCE_INLINE __m128 _mm_shuffle_ps_3202(__m128 a, __m128 b)
  702. {
  703. float32_t a0 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
  704. float32x2_t a22 =
  705. vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 0);
  706. float32x2_t a02 = vset_lane_f32(a0, a22, 1); /* TODO: use vzip ?*/
  707. float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
  708. return vreinterpretq_m128_f32(vcombine_f32(a02, b32));
  709. }
  710. FORCE_INLINE __m128 _mm_shuffle_ps_1133(__m128 a, __m128 b)
  711. {
  712. float32x2_t a33 =
  713. vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 1);
  714. float32x2_t b11 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 1);
  715. return vreinterpretq_m128_f32(vcombine_f32(a33, b11));
  716. }
  717. FORCE_INLINE __m128 _mm_shuffle_ps_2010(__m128 a, __m128 b)
  718. {
  719. float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
  720. float32_t b2 = vgetq_lane_f32(vreinterpretq_f32_m128(b), 2);
  721. float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
  722. float32x2_t b20 = vset_lane_f32(b2, b00, 1);
  723. return vreinterpretq_m128_f32(vcombine_f32(a10, b20));
  724. }
  725. FORCE_INLINE __m128 _mm_shuffle_ps_2001(__m128 a, __m128 b)
  726. {
  727. float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
  728. float32_t b2 = vgetq_lane_f32(b, 2);
  729. float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
  730. float32x2_t b20 = vset_lane_f32(b2, b00, 1);
  731. return vreinterpretq_m128_f32(vcombine_f32(a01, b20));
  732. }
  733. FORCE_INLINE __m128 _mm_shuffle_ps_2032(__m128 a, __m128 b)
  734. {
  735. float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
  736. float32_t b2 = vgetq_lane_f32(b, 2);
  737. float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
  738. float32x2_t b20 = vset_lane_f32(b2, b00, 1);
  739. return vreinterpretq_m128_f32(vcombine_f32(a32, b20));
  740. }
  741. // Kahan summation for accurate summation of floating-point numbers.
  742. // http://blog.zachbjornson.com/2019/08/11/fast-float-summation.html
  743. FORCE_INLINE void _sse2neon_kadd_f32(float *sum, float *c, float y)
  744. {
  745. y -= *c;
  746. float t = *sum + y;
  747. *c = (t - *sum) - y;
  748. *sum = t;
  749. }
  750. #if defined(__ARM_FEATURE_CRYPTO) && \
  751. (defined(__aarch64__) || __has_builtin(__builtin_arm_crypto_vmullp64))
  752. // Wraps vmull_p64
  753. FORCE_INLINE uint64x2_t _sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b)
  754. {
  755. poly64_t a = vget_lane_p64(vreinterpret_p64_u64(_a), 0);
  756. poly64_t b = vget_lane_p64(vreinterpret_p64_u64(_b), 0);
  757. return vreinterpretq_u64_p128(vmull_p64(a, b));
  758. }
  759. #else // ARMv7 polyfill
  760. // ARMv7/some A64 lacks vmull_p64, but it has vmull_p8.
  761. //
  762. // vmull_p8 calculates 8 8-bit->16-bit polynomial multiplies, but we need a
  763. // 64-bit->128-bit polynomial multiply.
  764. //
  765. // It needs some work and is somewhat slow, but it is still faster than all
  766. // known scalar methods.
  767. //
  768. // Algorithm adapted to C from
  769. // https://www.workofard.com/2017/07/ghash-for-low-end-cores/, which is adapted
  770. // from "Fast Software Polynomial Multiplication on ARM Processors Using the
  771. // NEON Engine" by Danilo Camara, Conrado Gouvea, Julio Lopez and Ricardo Dahab
  772. // (https://hal.inria.fr/hal-01506572)
  773. static uint64x2_t _sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b)
  774. {
  775. poly8x8_t a = vreinterpret_p8_u64(_a);
  776. poly8x8_t b = vreinterpret_p8_u64(_b);
  777. // Masks
  778. uint8x16_t k48_32 = vcombine_u8(vcreate_u8(0x0000ffffffffffff),
  779. vcreate_u8(0x00000000ffffffff));
  780. uint8x16_t k16_00 = vcombine_u8(vcreate_u8(0x000000000000ffff),
  781. vcreate_u8(0x0000000000000000));
  782. // Do the multiplies, rotating with vext to get all combinations
  783. uint8x16_t d = vreinterpretq_u8_p16(vmull_p8(a, b)); // D = A0 * B0
  784. uint8x16_t e =
  785. vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 1))); // E = A0 * B1
  786. uint8x16_t f =
  787. vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 1), b)); // F = A1 * B0
  788. uint8x16_t g =
  789. vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 2))); // G = A0 * B2
  790. uint8x16_t h =
  791. vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 2), b)); // H = A2 * B0
  792. uint8x16_t i =
  793. vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 3))); // I = A0 * B3
  794. uint8x16_t j =
  795. vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 3), b)); // J = A3 * B0
  796. uint8x16_t k =
  797. vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 4))); // L = A0 * B4
  798. // Add cross products
  799. uint8x16_t l = veorq_u8(e, f); // L = E + F
  800. uint8x16_t m = veorq_u8(g, h); // M = G + H
  801. uint8x16_t n = veorq_u8(i, j); // N = I + J
  802. // Interleave. Using vzip1 and vzip2 prevents Clang from emitting TBL
  803. // instructions.
  804. #if defined(__aarch64__)
  805. uint8x16_t lm_p0 = vreinterpretq_u8_u64(
  806. vzip1q_u64(vreinterpretq_u64_u8(l), vreinterpretq_u64_u8(m)));
  807. uint8x16_t lm_p1 = vreinterpretq_u8_u64(
  808. vzip2q_u64(vreinterpretq_u64_u8(l), vreinterpretq_u64_u8(m)));
  809. uint8x16_t nk_p0 = vreinterpretq_u8_u64(
  810. vzip1q_u64(vreinterpretq_u64_u8(n), vreinterpretq_u64_u8(k)));
  811. uint8x16_t nk_p1 = vreinterpretq_u8_u64(
  812. vzip2q_u64(vreinterpretq_u64_u8(n), vreinterpretq_u64_u8(k)));
  813. #else
  814. uint8x16_t lm_p0 = vcombine_u8(vget_low_u8(l), vget_low_u8(m));
  815. uint8x16_t lm_p1 = vcombine_u8(vget_high_u8(l), vget_high_u8(m));
  816. uint8x16_t nk_p0 = vcombine_u8(vget_low_u8(n), vget_low_u8(k));
  817. uint8x16_t nk_p1 = vcombine_u8(vget_high_u8(n), vget_high_u8(k));
  818. #endif
  819. // t0 = (L) (P0 + P1) << 8
  820. // t1 = (M) (P2 + P3) << 16
  821. uint8x16_t t0t1_tmp = veorq_u8(lm_p0, lm_p1);
  822. uint8x16_t t0t1_h = vandq_u8(lm_p1, k48_32);
  823. uint8x16_t t0t1_l = veorq_u8(t0t1_tmp, t0t1_h);
  824. // t2 = (N) (P4 + P5) << 24
  825. // t3 = (K) (P6 + P7) << 32
  826. uint8x16_t t2t3_tmp = veorq_u8(nk_p0, nk_p1);
  827. uint8x16_t t2t3_h = vandq_u8(nk_p1, k16_00);
  828. uint8x16_t t2t3_l = veorq_u8(t2t3_tmp, t2t3_h);
  829. // De-interleave
  830. #if defined(__aarch64__)
  831. uint8x16_t t0 = vreinterpretq_u8_u64(
  832. vuzp1q_u64(vreinterpretq_u64_u8(t0t1_l), vreinterpretq_u64_u8(t0t1_h)));
  833. uint8x16_t t1 = vreinterpretq_u8_u64(
  834. vuzp2q_u64(vreinterpretq_u64_u8(t0t1_l), vreinterpretq_u64_u8(t0t1_h)));
  835. uint8x16_t t2 = vreinterpretq_u8_u64(
  836. vuzp1q_u64(vreinterpretq_u64_u8(t2t3_l), vreinterpretq_u64_u8(t2t3_h)));
  837. uint8x16_t t3 = vreinterpretq_u8_u64(
  838. vuzp2q_u64(vreinterpretq_u64_u8(t2t3_l), vreinterpretq_u64_u8(t2t3_h)));
  839. #else
  840. uint8x16_t t1 = vcombine_u8(vget_high_u8(t0t1_l), vget_high_u8(t0t1_h));
  841. uint8x16_t t0 = vcombine_u8(vget_low_u8(t0t1_l), vget_low_u8(t0t1_h));
  842. uint8x16_t t3 = vcombine_u8(vget_high_u8(t2t3_l), vget_high_u8(t2t3_h));
  843. uint8x16_t t2 = vcombine_u8(vget_low_u8(t2t3_l), vget_low_u8(t2t3_h));
  844. #endif
  845. // Shift the cross products
  846. uint8x16_t t0_shift = vextq_u8(t0, t0, 15); // t0 << 8
  847. uint8x16_t t1_shift = vextq_u8(t1, t1, 14); // t1 << 16
  848. uint8x16_t t2_shift = vextq_u8(t2, t2, 13); // t2 << 24
  849. uint8x16_t t3_shift = vextq_u8(t3, t3, 12); // t3 << 32
  850. // Accumulate the products
  851. uint8x16_t cross1 = veorq_u8(t0_shift, t1_shift);
  852. uint8x16_t cross2 = veorq_u8(t2_shift, t3_shift);
  853. uint8x16_t mix = veorq_u8(d, cross1);
  854. uint8x16_t r = veorq_u8(mix, cross2);
  855. return vreinterpretq_u64_u8(r);
  856. }
  857. #endif // ARMv7 polyfill
  858. // C equivalent:
  859. // __m128i _mm_shuffle_epi32_default(__m128i a,
  860. // __constrange(0, 255) int imm) {
  861. // __m128i ret;
  862. // ret[0] = a[imm & 0x3]; ret[1] = a[(imm >> 2) & 0x3];
  863. // ret[2] = a[(imm >> 4) & 0x03]; ret[3] = a[(imm >> 6) & 0x03];
  864. // return ret;
  865. // }
  866. #define _mm_shuffle_epi32_default(a, imm) \
  867. __extension__({ \
  868. int32x4_t ret; \
  869. ret = vmovq_n_s32( \
  870. vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm) & (0x3))); \
  871. ret = vsetq_lane_s32( \
  872. vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 2) & 0x3), \
  873. ret, 1); \
  874. ret = vsetq_lane_s32( \
  875. vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 4) & 0x3), \
  876. ret, 2); \
  877. ret = vsetq_lane_s32( \
  878. vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 6) & 0x3), \
  879. ret, 3); \
  880. vreinterpretq_m128i_s32(ret); \
  881. })
  882. // Takes the upper 64 bits of a and places it in the low end of the result
  883. // Takes the lower 64 bits of a and places it into the high end of the result.
  884. FORCE_INLINE __m128i _mm_shuffle_epi_1032(__m128i a)
  885. {
  886. int32x2_t a32 = vget_high_s32(vreinterpretq_s32_m128i(a));
  887. int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a));
  888. return vreinterpretq_m128i_s32(vcombine_s32(a32, a10));
  889. }
  890. // takes the lower two 32-bit values from a and swaps them and places in low end
  891. // of result takes the higher two 32 bit values from a and swaps them and places
  892. // in high end of result.
  893. FORCE_INLINE __m128i _mm_shuffle_epi_2301(__m128i a)
  894. {
  895. int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
  896. int32x2_t a23 = vrev64_s32(vget_high_s32(vreinterpretq_s32_m128i(a)));
  897. return vreinterpretq_m128i_s32(vcombine_s32(a01, a23));
  898. }
  899. // rotates the least significant 32 bits into the most significant 32 bits, and
  900. // shifts the rest down
  901. FORCE_INLINE __m128i _mm_shuffle_epi_0321(__m128i a)
  902. {
  903. return vreinterpretq_m128i_s32(
  904. vextq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(a), 1));
  905. }
  906. // rotates the most significant 32 bits into the least significant 32 bits, and
  907. // shifts the rest up
  908. FORCE_INLINE __m128i _mm_shuffle_epi_2103(__m128i a)
  909. {
  910. return vreinterpretq_m128i_s32(
  911. vextq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(a), 3));
  912. }
  913. // gets the lower 64 bits of a, and places it in the upper 64 bits
  914. // gets the lower 64 bits of a and places it in the lower 64 bits
  915. FORCE_INLINE __m128i _mm_shuffle_epi_1010(__m128i a)
  916. {
  917. int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a));
  918. return vreinterpretq_m128i_s32(vcombine_s32(a10, a10));
  919. }
  920. // gets the lower 64 bits of a, swaps the 0 and 1 elements, and places it in the
  921. // lower 64 bits gets the lower 64 bits of a, and places it in the upper 64 bits
  922. FORCE_INLINE __m128i _mm_shuffle_epi_1001(__m128i a)
  923. {
  924. int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
  925. int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a));
  926. return vreinterpretq_m128i_s32(vcombine_s32(a01, a10));
  927. }
  928. // gets the lower 64 bits of a, swaps the 0 and 1 elements and places it in the
  929. // upper 64 bits gets the lower 64 bits of a, swaps the 0 and 1 elements, and
  930. // places it in the lower 64 bits
  931. FORCE_INLINE __m128i _mm_shuffle_epi_0101(__m128i a)
  932. {
  933. int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
  934. return vreinterpretq_m128i_s32(vcombine_s32(a01, a01));
  935. }
  936. FORCE_INLINE __m128i _mm_shuffle_epi_2211(__m128i a)
  937. {
  938. int32x2_t a11 = vdup_lane_s32(vget_low_s32(vreinterpretq_s32_m128i(a)), 1);
  939. int32x2_t a22 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 0);
  940. return vreinterpretq_m128i_s32(vcombine_s32(a11, a22));
  941. }
  942. FORCE_INLINE __m128i _mm_shuffle_epi_0122(__m128i a)
  943. {
  944. int32x2_t a22 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 0);
  945. int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
  946. return vreinterpretq_m128i_s32(vcombine_s32(a22, a01));
  947. }
  948. FORCE_INLINE __m128i _mm_shuffle_epi_3332(__m128i a)
  949. {
  950. int32x2_t a32 = vget_high_s32(vreinterpretq_s32_m128i(a));
  951. int32x2_t a33 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 1);
  952. return vreinterpretq_m128i_s32(vcombine_s32(a32, a33));
  953. }
  954. // FORCE_INLINE __m128i _mm_shuffle_epi32_splat(__m128i a, __constrange(0,255)
  955. // int imm)
  956. #if defined(__aarch64__)
  957. #define _mm_shuffle_epi32_splat(a, imm) \
  958. __extension__({ \
  959. vreinterpretq_m128i_s32( \
  960. vdupq_laneq_s32(vreinterpretq_s32_m128i(a), (imm))); \
  961. })
  962. #else
  963. #define _mm_shuffle_epi32_splat(a, imm) \
  964. __extension__({ \
  965. vreinterpretq_m128i_s32( \
  966. vdupq_n_s32(vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm)))); \
  967. })
  968. #endif
  969. // NEON does not support a general purpose permute intrinsic
  970. // Selects four specific single-precision, floating-point values from a and b,
  971. // based on the mask i.
  972. //
  973. // C equivalent:
  974. // __m128 _mm_shuffle_ps_default(__m128 a, __m128 b,
  975. // __constrange(0, 255) int imm) {
  976. // __m128 ret;
  977. // ret[0] = a[imm & 0x3]; ret[1] = a[(imm >> 2) & 0x3];
  978. // ret[2] = b[(imm >> 4) & 0x03]; ret[3] = b[(imm >> 6) & 0x03];
  979. // return ret;
  980. // }
  981. //
  982. // https://msdn.microsoft.com/en-us/library/vstudio/5f0858x0(v=vs.100).aspx
  983. #define _mm_shuffle_ps_default(a, b, imm) \
  984. __extension__({ \
  985. float32x4_t ret; \
  986. ret = vmovq_n_f32( \
  987. vgetq_lane_f32(vreinterpretq_f32_m128(a), (imm) & (0x3))); \
  988. ret = vsetq_lane_f32( \
  989. vgetq_lane_f32(vreinterpretq_f32_m128(a), ((imm) >> 2) & 0x3), \
  990. ret, 1); \
  991. ret = vsetq_lane_f32( \
  992. vgetq_lane_f32(vreinterpretq_f32_m128(b), ((imm) >> 4) & 0x3), \
  993. ret, 2); \
  994. ret = vsetq_lane_f32( \
  995. vgetq_lane_f32(vreinterpretq_f32_m128(b), ((imm) >> 6) & 0x3), \
  996. ret, 3); \
  997. vreinterpretq_m128_f32(ret); \
  998. })
  999. // Shuffles the lower 4 signed or unsigned 16-bit integers in a as specified
  1000. // by imm.
  1001. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/y41dkk37(v=vs.100)
  1002. // FORCE_INLINE __m128i _mm_shufflelo_epi16_function(__m128i a,
  1003. // __constrange(0,255) int
  1004. // imm)
  1005. #define _mm_shufflelo_epi16_function(a, imm) \
  1006. __extension__({ \
  1007. int16x8_t ret = vreinterpretq_s16_m128i(a); \
  1008. int16x4_t lowBits = vget_low_s16(ret); \
  1009. ret = vsetq_lane_s16(vget_lane_s16(lowBits, (imm) & (0x3)), ret, 0); \
  1010. ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm) >> 2) & 0x3), ret, \
  1011. 1); \
  1012. ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm) >> 4) & 0x3), ret, \
  1013. 2); \
  1014. ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm) >> 6) & 0x3), ret, \
  1015. 3); \
  1016. vreinterpretq_m128i_s16(ret); \
  1017. })
  1018. // Shuffles the upper 4 signed or unsigned 16-bit integers in a as specified
  1019. // by imm.
  1020. // https://msdn.microsoft.com/en-us/library/13ywktbs(v=vs.100).aspx
  1021. // FORCE_INLINE __m128i _mm_shufflehi_epi16_function(__m128i a,
  1022. // __constrange(0,255) int
  1023. // imm)
  1024. #define _mm_shufflehi_epi16_function(a, imm) \
  1025. __extension__({ \
  1026. int16x8_t ret = vreinterpretq_s16_m128i(a); \
  1027. int16x4_t highBits = vget_high_s16(ret); \
  1028. ret = vsetq_lane_s16(vget_lane_s16(highBits, (imm) & (0x3)), ret, 4); \
  1029. ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 2) & 0x3), ret, \
  1030. 5); \
  1031. ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 4) & 0x3), ret, \
  1032. 6); \
  1033. ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 6) & 0x3), ret, \
  1034. 7); \
  1035. vreinterpretq_m128i_s16(ret); \
  1036. })
  1037. /* MMX */
  1038. //_mm_empty is a no-op on arm
  1039. FORCE_INLINE void _mm_empty(void) {}
  1040. /* SSE */
  1041. // Adds the four single-precision, floating-point values of a and b.
  1042. //
  1043. // r0 := a0 + b0
  1044. // r1 := a1 + b1
  1045. // r2 := a2 + b2
  1046. // r3 := a3 + b3
  1047. //
  1048. // https://msdn.microsoft.com/en-us/library/vstudio/c9848chc(v=vs.100).aspx
  1049. FORCE_INLINE __m128 _mm_add_ps(__m128 a, __m128 b)
  1050. {
  1051. return vreinterpretq_m128_f32(
  1052. vaddq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  1053. }
  1054. // adds the scalar single-precision floating point values of a and b.
  1055. // https://msdn.microsoft.com/en-us/library/be94x2y6(v=vs.100).aspx
  1056. FORCE_INLINE __m128 _mm_add_ss(__m128 a, __m128 b)
  1057. {
  1058. float32_t b0 = vgetq_lane_f32(vreinterpretq_f32_m128(b), 0);
  1059. float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0);
  1060. // the upper values in the result must be the remnants of <a>.
  1061. return vreinterpretq_m128_f32(vaddq_f32(a, value));
  1062. }
  1063. // Computes the bitwise AND of the four single-precision, floating-point values
  1064. // of a and b.
  1065. //
  1066. // r0 := a0 & b0
  1067. // r1 := a1 & b1
  1068. // r2 := a2 & b2
  1069. // r3 := a3 & b3
  1070. //
  1071. // https://msdn.microsoft.com/en-us/library/vstudio/73ck1xc5(v=vs.100).aspx
  1072. FORCE_INLINE __m128 _mm_and_ps(__m128 a, __m128 b)
  1073. {
  1074. return vreinterpretq_m128_s32(
  1075. vandq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b)));
  1076. }
  1077. // Computes the bitwise AND-NOT of the four single-precision, floating-point
  1078. // values of a and b.
  1079. //
  1080. // r0 := ~a0 & b0
  1081. // r1 := ~a1 & b1
  1082. // r2 := ~a2 & b2
  1083. // r3 := ~a3 & b3
  1084. //
  1085. // https://msdn.microsoft.com/en-us/library/vstudio/68h7wd02(v=vs.100).aspx
  1086. FORCE_INLINE __m128 _mm_andnot_ps(__m128 a, __m128 b)
  1087. {
  1088. return vreinterpretq_m128_s32(
  1089. vbicq_s32(vreinterpretq_s32_m128(b),
  1090. vreinterpretq_s32_m128(a))); // *NOTE* argument swap
  1091. }
  1092. // Average packed unsigned 16-bit integers in a and b, and store the results in
  1093. // dst.
  1094. //
  1095. // FOR j := 0 to 3
  1096. // i := j*16
  1097. // dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1
  1098. // ENDFOR
  1099. //
  1100. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_avg_pu16
  1101. FORCE_INLINE __m64 _mm_avg_pu16(__m64 a, __m64 b)
  1102. {
  1103. return vreinterpret_m64_u16(
  1104. vrhadd_u16(vreinterpret_u16_m64(a), vreinterpret_u16_m64(b)));
  1105. }
  1106. // Average packed unsigned 8-bit integers in a and b, and store the results in
  1107. // dst.
  1108. //
  1109. // FOR j := 0 to 7
  1110. // i := j*8
  1111. // dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1
  1112. // ENDFOR
  1113. //
  1114. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_avg_pu8
  1115. FORCE_INLINE __m64 _mm_avg_pu8(__m64 a, __m64 b)
  1116. {
  1117. return vreinterpret_m64_u8(
  1118. vrhadd_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b)));
  1119. }
  1120. // Compares for equality.
  1121. // https://msdn.microsoft.com/en-us/library/vstudio/36aectz5(v=vs.100).aspx
  1122. FORCE_INLINE __m128 _mm_cmpeq_ps(__m128 a, __m128 b)
  1123. {
  1124. return vreinterpretq_m128_u32(
  1125. vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  1126. }
  1127. // Compares for equality.
  1128. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/k423z28e(v=vs.100)
  1129. FORCE_INLINE __m128 _mm_cmpeq_ss(__m128 a, __m128 b)
  1130. {
  1131. return _mm_move_ss(a, _mm_cmpeq_ps(a, b));
  1132. }
  1133. // Compares for greater than or equal.
  1134. // https://msdn.microsoft.com/en-us/library/vstudio/fs813y2t(v=vs.100).aspx
  1135. FORCE_INLINE __m128 _mm_cmpge_ps(__m128 a, __m128 b)
  1136. {
  1137. return vreinterpretq_m128_u32(
  1138. vcgeq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  1139. }
  1140. // Compares for greater than or equal.
  1141. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/kesh3ddc(v=vs.100)
  1142. FORCE_INLINE __m128 _mm_cmpge_ss(__m128 a, __m128 b)
  1143. {
  1144. return _mm_move_ss(a, _mm_cmpge_ps(a, b));
  1145. }
  1146. // Compares for greater than.
  1147. //
  1148. // r0 := (a0 > b0) ? 0xffffffff : 0x0
  1149. // r1 := (a1 > b1) ? 0xffffffff : 0x0
  1150. // r2 := (a2 > b2) ? 0xffffffff : 0x0
  1151. // r3 := (a3 > b3) ? 0xffffffff : 0x0
  1152. //
  1153. // https://msdn.microsoft.com/en-us/library/vstudio/11dy102s(v=vs.100).aspx
  1154. FORCE_INLINE __m128 _mm_cmpgt_ps(__m128 a, __m128 b)
  1155. {
  1156. return vreinterpretq_m128_u32(
  1157. vcgtq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  1158. }
  1159. // Compares for greater than.
  1160. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/1xyyyy9e(v=vs.100)
  1161. FORCE_INLINE __m128 _mm_cmpgt_ss(__m128 a, __m128 b)
  1162. {
  1163. return _mm_move_ss(a, _mm_cmpgt_ps(a, b));
  1164. }
  1165. // Compares for less than or equal.
  1166. //
  1167. // r0 := (a0 <= b0) ? 0xffffffff : 0x0
  1168. // r1 := (a1 <= b1) ? 0xffffffff : 0x0
  1169. // r2 := (a2 <= b2) ? 0xffffffff : 0x0
  1170. // r3 := (a3 <= b3) ? 0xffffffff : 0x0
  1171. //
  1172. // https://msdn.microsoft.com/en-us/library/vstudio/1s75w83z(v=vs.100).aspx
  1173. FORCE_INLINE __m128 _mm_cmple_ps(__m128 a, __m128 b)
  1174. {
  1175. return vreinterpretq_m128_u32(
  1176. vcleq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  1177. }
  1178. // Compares for less than or equal.
  1179. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/a7x0hbhw(v=vs.100)
  1180. FORCE_INLINE __m128 _mm_cmple_ss(__m128 a, __m128 b)
  1181. {
  1182. return _mm_move_ss(a, _mm_cmple_ps(a, b));
  1183. }
  1184. // Compares for less than
  1185. // https://msdn.microsoft.com/en-us/library/vstudio/f330yhc8(v=vs.100).aspx
  1186. FORCE_INLINE __m128 _mm_cmplt_ps(__m128 a, __m128 b)
  1187. {
  1188. return vreinterpretq_m128_u32(
  1189. vcltq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  1190. }
  1191. // Compares for less than
  1192. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/fy94wye7(v=vs.100)
  1193. FORCE_INLINE __m128 _mm_cmplt_ss(__m128 a, __m128 b)
  1194. {
  1195. return _mm_move_ss(a, _mm_cmplt_ps(a, b));
  1196. }
  1197. // Compares for inequality.
  1198. // https://msdn.microsoft.com/en-us/library/sf44thbx(v=vs.100).aspx
  1199. FORCE_INLINE __m128 _mm_cmpneq_ps(__m128 a, __m128 b)
  1200. {
  1201. return vreinterpretq_m128_u32(vmvnq_u32(
  1202. vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))));
  1203. }
  1204. // Compares for inequality.
  1205. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/ekya8fh4(v=vs.100)
  1206. FORCE_INLINE __m128 _mm_cmpneq_ss(__m128 a, __m128 b)
  1207. {
  1208. return _mm_move_ss(a, _mm_cmpneq_ps(a, b));
  1209. }
  1210. // Compares for not greater than or equal.
  1211. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/wsexys62(v=vs.100)
  1212. FORCE_INLINE __m128 _mm_cmpnge_ps(__m128 a, __m128 b)
  1213. {
  1214. return vreinterpretq_m128_u32(vmvnq_u32(
  1215. vcgeq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))));
  1216. }
  1217. // Compares for not greater than or equal.
  1218. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/fk2y80s8(v=vs.100)
  1219. FORCE_INLINE __m128 _mm_cmpnge_ss(__m128 a, __m128 b)
  1220. {
  1221. return _mm_move_ss(a, _mm_cmpnge_ps(a, b));
  1222. }
  1223. // Compares for not greater than.
  1224. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/d0xh7w0s(v=vs.100)
  1225. FORCE_INLINE __m128 _mm_cmpngt_ps(__m128 a, __m128 b)
  1226. {
  1227. return vreinterpretq_m128_u32(vmvnq_u32(
  1228. vcgtq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))));
  1229. }
  1230. // Compares for not greater than.
  1231. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/z7x9ydwh(v=vs.100)
  1232. FORCE_INLINE __m128 _mm_cmpngt_ss(__m128 a, __m128 b)
  1233. {
  1234. return _mm_move_ss(a, _mm_cmpngt_ps(a, b));
  1235. }
  1236. // Compares for not less than or equal.
  1237. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/6a330kxw(v=vs.100)
  1238. FORCE_INLINE __m128 _mm_cmpnle_ps(__m128 a, __m128 b)
  1239. {
  1240. return vreinterpretq_m128_u32(vmvnq_u32(
  1241. vcleq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))));
  1242. }
  1243. // Compares for not less than or equal.
  1244. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/z7x9ydwh(v=vs.100)
  1245. FORCE_INLINE __m128 _mm_cmpnle_ss(__m128 a, __m128 b)
  1246. {
  1247. return _mm_move_ss(a, _mm_cmpnle_ps(a, b));
  1248. }
  1249. // Compares for not less than.
  1250. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/4686bbdw(v=vs.100)
  1251. FORCE_INLINE __m128 _mm_cmpnlt_ps(__m128 a, __m128 b)
  1252. {
  1253. return vreinterpretq_m128_u32(vmvnq_u32(
  1254. vcltq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))));
  1255. }
  1256. // Compares for not less than.
  1257. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/56b9z2wf(v=vs.100)
  1258. FORCE_INLINE __m128 _mm_cmpnlt_ss(__m128 a, __m128 b)
  1259. {
  1260. return _mm_move_ss(a, _mm_cmpnlt_ps(a, b));
  1261. }
  1262. // Compares the four 32-bit floats in a and b to check if any values are NaN.
  1263. // Ordered compare between each value returns true for "orderable" and false for
  1264. // "not orderable" (NaN).
  1265. // https://msdn.microsoft.com/en-us/library/vstudio/0h9w00fx(v=vs.100).aspx see
  1266. // also:
  1267. // http://stackoverflow.com/questions/8627331/what-does-ordered-unordered-comparison-mean
  1268. // http://stackoverflow.com/questions/29349621/neon-isnanval-intrinsics
  1269. FORCE_INLINE __m128 _mm_cmpord_ps(__m128 a, __m128 b)
  1270. {
  1271. // Note: NEON does not have ordered compare builtin
  1272. // Need to compare a eq a and b eq b to check for NaN
  1273. // Do AND of results to get final
  1274. uint32x4_t ceqaa =
  1275. vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
  1276. uint32x4_t ceqbb =
  1277. vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
  1278. return vreinterpretq_m128_u32(vandq_u32(ceqaa, ceqbb));
  1279. }
  1280. // Compares for ordered.
  1281. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/343t62da(v=vs.100)
  1282. FORCE_INLINE __m128 _mm_cmpord_ss(__m128 a, __m128 b)
  1283. {
  1284. return _mm_move_ss(a, _mm_cmpord_ps(a, b));
  1285. }
  1286. // Compares for unordered.
  1287. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/khy6fk1t(v=vs.100)
  1288. FORCE_INLINE __m128 _mm_cmpunord_ps(__m128 a, __m128 b)
  1289. {
  1290. uint32x4_t f32a =
  1291. vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
  1292. uint32x4_t f32b =
  1293. vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
  1294. return vreinterpretq_m128_u32(vmvnq_u32(vandq_u32(f32a, f32b)));
  1295. }
  1296. // Compares for unordered.
  1297. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/2as2387b(v=vs.100)
  1298. FORCE_INLINE __m128 _mm_cmpunord_ss(__m128 a, __m128 b)
  1299. {
  1300. return _mm_move_ss(a, _mm_cmpunord_ps(a, b));
  1301. }
  1302. // Compares the lower single-precision floating point scalar values of a and b
  1303. // using an equality operation. :
  1304. // https://msdn.microsoft.com/en-us/library/93yx2h2b(v=vs.100).aspx
  1305. FORCE_INLINE int _mm_comieq_ss(__m128 a, __m128 b)
  1306. {
  1307. uint32x4_t a_eq_b =
  1308. vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
  1309. return vgetq_lane_u32(a_eq_b, 0) & 0x1;
  1310. }
  1311. // Compares the lower single-precision floating point scalar values of a and b
  1312. // using a greater than or equal operation. :
  1313. // https://msdn.microsoft.com/en-us/library/8t80des6(v=vs.100).aspx
  1314. FORCE_INLINE int _mm_comige_ss(__m128 a, __m128 b)
  1315. {
  1316. uint32x4_t a_ge_b =
  1317. vcgeq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
  1318. return vgetq_lane_u32(a_ge_b, 0) & 0x1;
  1319. }
  1320. // Compares the lower single-precision floating point scalar values of a and b
  1321. // using a greater than operation. :
  1322. // https://msdn.microsoft.com/en-us/library/b0738e0t(v=vs.100).aspx
  1323. FORCE_INLINE int _mm_comigt_ss(__m128 a, __m128 b)
  1324. {
  1325. uint32x4_t a_gt_b =
  1326. vcgtq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
  1327. return vgetq_lane_u32(a_gt_b, 0) & 0x1;
  1328. }
  1329. // Compares the lower single-precision floating point scalar values of a and b
  1330. // using a less than or equal operation. :
  1331. // https://msdn.microsoft.com/en-us/library/1w4t7c57(v=vs.90).aspx
  1332. FORCE_INLINE int _mm_comile_ss(__m128 a, __m128 b)
  1333. {
  1334. uint32x4_t a_le_b =
  1335. vcleq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
  1336. return vgetq_lane_u32(a_le_b, 0) & 0x1;
  1337. }
  1338. // Compares the lower single-precision floating point scalar values of a and b
  1339. // using a less than operation. :
  1340. // https://msdn.microsoft.com/en-us/library/2kwe606b(v=vs.90).aspx Important
  1341. // note!! The documentation on MSDN is incorrect! If either of the values is a
  1342. // NAN the docs say you will get a one, but in fact, it will return a zero!!
  1343. FORCE_INLINE int _mm_comilt_ss(__m128 a, __m128 b)
  1344. {
  1345. uint32x4_t a_lt_b =
  1346. vcltq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
  1347. return vgetq_lane_u32(a_lt_b, 0) & 0x1;
  1348. }
  1349. // Compares the lower single-precision floating point scalar values of a and b
  1350. // using an inequality operation. :
  1351. // https://msdn.microsoft.com/en-us/library/bafh5e0a(v=vs.90).aspx
  1352. FORCE_INLINE int _mm_comineq_ss(__m128 a, __m128 b)
  1353. {
  1354. return !_mm_comieq_ss(a, b);
  1355. }
  1356. // Convert packed signed 32-bit integers in b to packed single-precision
  1357. // (32-bit) floating-point elements, store the results in the lower 2 elements
  1358. // of dst, and copy the upper 2 packed elements from a to the upper elements of
  1359. // dst.
  1360. //
  1361. // dst[31:0] := Convert_Int32_To_FP32(b[31:0])
  1362. // dst[63:32] := Convert_Int32_To_FP32(b[63:32])
  1363. // dst[95:64] := a[95:64]
  1364. // dst[127:96] := a[127:96]
  1365. //
  1366. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_pi2ps
  1367. FORCE_INLINE __m128 _mm_cvt_pi2ps(__m128 a, __m64 b)
  1368. {
  1369. return vreinterpretq_m128_f32(
  1370. vcombine_f32(vcvt_f32_s32(vreinterpret_s32_m64(b)),
  1371. vget_high_f32(vreinterpretq_f32_m128(a))));
  1372. }
  1373. // Convert packed single-precision (32-bit) floating-point elements in a to
  1374. // packed 32-bit integers, and store the results in dst.
  1375. //
  1376. // FOR j := 0 to 1
  1377. // i := 32*j
  1378. // dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i])
  1379. // ENDFOR
  1380. //
  1381. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_ps2pi
  1382. FORCE_INLINE __m64 _mm_cvt_ps2pi(__m128 a)
  1383. {
  1384. #if defined(__aarch64__) || defined(__ARM_FEATURE_DIRECTED_ROUNDING)
  1385. return vreinterpret_m64_s32(
  1386. vget_low_s32(vcvtnq_s32_f32(vrndiq_f32(vreinterpretq_f32_m128(a)))));
  1387. #else
  1388. return vreinterpret_m64_s32(vcvt_s32_f32(vget_low_f32(
  1389. vreinterpretq_f32_m128(_mm_round_ps(a, _MM_FROUND_CUR_DIRECTION)))));
  1390. #endif
  1391. }
  1392. // Convert the signed 32-bit integer b to a single-precision (32-bit)
  1393. // floating-point element, store the result in the lower element of dst, and
  1394. // copy the upper 3 packed elements from a to the upper elements of dst.
  1395. //
  1396. // dst[31:0] := Convert_Int32_To_FP32(b[31:0])
  1397. // dst[127:32] := a[127:32]
  1398. //
  1399. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_si2ss
  1400. FORCE_INLINE __m128 _mm_cvt_si2ss(__m128 a, int b)
  1401. {
  1402. return vreinterpretq_m128_f32(
  1403. vsetq_lane_f32((float) b, vreinterpretq_f32_m128(a), 0));
  1404. }
  1405. // Convert the lower single-precision (32-bit) floating-point element in a to a
  1406. // 32-bit integer, and store the result in dst.
  1407. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_ss2si
  1408. FORCE_INLINE int _mm_cvt_ss2si(__m128 a)
  1409. {
  1410. #if defined(__aarch64__) || defined(__ARM_FEATURE_DIRECTED_ROUNDING)
  1411. return vgetq_lane_s32(vcvtnq_s32_f32(vrndiq_f32(vreinterpretq_f32_m128(a))),
  1412. 0);
  1413. #else
  1414. float32_t data = vgetq_lane_f32(
  1415. vreinterpretq_f32_m128(_mm_round_ps(a, _MM_FROUND_CUR_DIRECTION)), 0);
  1416. return (int32_t) data;
  1417. #endif
  1418. }
  1419. // Convert packed 16-bit integers in a to packed single-precision (32-bit)
  1420. // floating-point elements, and store the results in dst.
  1421. //
  1422. // FOR j := 0 to 3
  1423. // i := j*16
  1424. // m := j*32
  1425. // dst[m+31:m] := Convert_Int16_To_FP32(a[i+15:i])
  1426. // ENDFOR
  1427. //
  1428. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpi16_ps
  1429. FORCE_INLINE __m128 _mm_cvtpi16_ps(__m64 a)
  1430. {
  1431. return vreinterpretq_m128_f32(
  1432. vcvtq_f32_s32(vmovl_s16(vreinterpret_s16_m64(a))));
  1433. }
  1434. // Convert packed 32-bit integers in b to packed single-precision (32-bit)
  1435. // floating-point elements, store the results in the lower 2 elements of dst,
  1436. // and copy the upper 2 packed elements from a to the upper elements of dst.
  1437. //
  1438. // dst[31:0] := Convert_Int32_To_FP32(b[31:0])
  1439. // dst[63:32] := Convert_Int32_To_FP32(b[63:32])
  1440. // dst[95:64] := a[95:64]
  1441. // dst[127:96] := a[127:96]
  1442. //
  1443. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpi32_ps
  1444. FORCE_INLINE __m128 _mm_cvtpi32_ps(__m128 a, __m64 b)
  1445. {
  1446. return vreinterpretq_m128_f32(
  1447. vcombine_f32(vcvt_f32_s32(vreinterpret_s32_m64(b)),
  1448. vget_high_f32(vreinterpretq_f32_m128(a))));
  1449. }
  1450. // Convert packed signed 32-bit integers in a to packed single-precision
  1451. // (32-bit) floating-point elements, store the results in the lower 2 elements
  1452. // of dst, then convert the packed signed 32-bit integers in b to
  1453. // single-precision (32-bit) floating-point element, and store the results in
  1454. // the upper 2 elements of dst.
  1455. //
  1456. // dst[31:0] := Convert_Int32_To_FP32(a[31:0])
  1457. // dst[63:32] := Convert_Int32_To_FP32(a[63:32])
  1458. // dst[95:64] := Convert_Int32_To_FP32(b[31:0])
  1459. // dst[127:96] := Convert_Int32_To_FP32(b[63:32])
  1460. //
  1461. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpi32x2_ps
  1462. FORCE_INLINE __m128 _mm_cvtpi32x2_ps(__m64 a, __m64 b)
  1463. {
  1464. return vreinterpretq_m128_f32(vcvtq_f32_s32(
  1465. vcombine_s32(vreinterpret_s32_m64(a), vreinterpret_s32_m64(b))));
  1466. }
  1467. // Convert the lower packed 8-bit integers in a to packed single-precision
  1468. // (32-bit) floating-point elements, and store the results in dst.
  1469. //
  1470. // FOR j := 0 to 3
  1471. // i := j*8
  1472. // m := j*32
  1473. // dst[m+31:m] := Convert_Int8_To_FP32(a[i+7:i])
  1474. // ENDFOR
  1475. //
  1476. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpi8_ps
  1477. FORCE_INLINE __m128 _mm_cvtpi8_ps(__m64 a)
  1478. {
  1479. return vreinterpretq_m128_f32(vcvtq_f32_s32(
  1480. vmovl_s16(vget_low_s16(vmovl_s8(vreinterpret_s8_m64(a))))));
  1481. }
  1482. // Convert packed single-precision (32-bit) floating-point elements in a to
  1483. // packed 16-bit integers, and store the results in dst. Note: this intrinsic
  1484. // will generate 0x7FFF, rather than 0x8000, for input values between 0x7FFF and
  1485. // 0x7FFFFFFF.
  1486. //
  1487. // FOR j := 0 to 3
  1488. // i := 16*j
  1489. // k := 32*j
  1490. // IF a[k+31:k] >= FP32(0x7FFF) && a[k+31:k] <= FP32(0x7FFFFFFF)
  1491. // dst[i+15:i] := 0x7FFF
  1492. // ELSE
  1493. // dst[i+15:i] := Convert_FP32_To_Int16(a[k+31:k])
  1494. // FI
  1495. // ENDFOR
  1496. //
  1497. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtps_pi16
  1498. FORCE_INLINE __m64 _mm_cvtps_pi16(__m128 a)
  1499. {
  1500. return vreinterpret_m64_s16(
  1501. vqmovn_s32(vreinterpretq_s32_m128i(_mm_cvtps_epi32(a))));
  1502. }
  1503. // Convert packed single-precision (32-bit) floating-point elements in a to
  1504. // packed 32-bit integers, and store the results in dst.
  1505. //
  1506. // FOR j := 0 to 1
  1507. // i := 32*j
  1508. // dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i])
  1509. // ENDFOR
  1510. //
  1511. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtps_pi32
  1512. #define _mm_cvtps_pi32(a) _mm_cvt_ps2pi(a)
  1513. // Convert packed single-precision (32-bit) floating-point elements in a to
  1514. // packed 8-bit integers, and store the results in lower 4 elements of dst.
  1515. // Note: this intrinsic will generate 0x7F, rather than 0x80, for input values
  1516. // between 0x7F and 0x7FFFFFFF.
  1517. //
  1518. // FOR j := 0 to 3
  1519. // i := 8*j
  1520. // k := 32*j
  1521. // IF a[k+31:k] >= FP32(0x7F) && a[k+31:k] <= FP32(0x7FFFFFFF)
  1522. // dst[i+7:i] := 0x7F
  1523. // ELSE
  1524. // dst[i+7:i] := Convert_FP32_To_Int8(a[k+31:k])
  1525. // FI
  1526. // ENDFOR
  1527. //
  1528. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtps_pi8
  1529. FORCE_INLINE __m64 _mm_cvtps_pi8(__m128 a)
  1530. {
  1531. return vreinterpret_m64_s8(vqmovn_s16(
  1532. vcombine_s16(vreinterpret_s16_m64(_mm_cvtps_pi16(a)), vdup_n_s16(0))));
  1533. }
  1534. // Convert packed unsigned 16-bit integers in a to packed single-precision
  1535. // (32-bit) floating-point elements, and store the results in dst.
  1536. //
  1537. // FOR j := 0 to 3
  1538. // i := j*16
  1539. // m := j*32
  1540. // dst[m+31:m] := Convert_UInt16_To_FP32(a[i+15:i])
  1541. // ENDFOR
  1542. //
  1543. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpu16_ps
  1544. FORCE_INLINE __m128 _mm_cvtpu16_ps(__m64 a)
  1545. {
  1546. return vreinterpretq_m128_f32(
  1547. vcvtq_f32_u32(vmovl_u16(vreinterpret_u16_m64(a))));
  1548. }
  1549. // Convert the lower packed unsigned 8-bit integers in a to packed
  1550. // single-precision (32-bit) floating-point elements, and store the results in
  1551. // dst.
  1552. //
  1553. // FOR j := 0 to 3
  1554. // i := j*8
  1555. // m := j*32
  1556. // dst[m+31:m] := Convert_UInt8_To_FP32(a[i+7:i])
  1557. // ENDFOR
  1558. //
  1559. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpu8_ps
  1560. FORCE_INLINE __m128 _mm_cvtpu8_ps(__m64 a)
  1561. {
  1562. return vreinterpretq_m128_f32(vcvtq_f32_u32(
  1563. vmovl_u16(vget_low_u16(vmovl_u8(vreinterpret_u8_m64(a))))));
  1564. }
  1565. // Convert the signed 32-bit integer b to a single-precision (32-bit)
  1566. // floating-point element, store the result in the lower element of dst, and
  1567. // copy the upper 3 packed elements from a to the upper elements of dst.
  1568. //
  1569. // dst[31:0] := Convert_Int32_To_FP32(b[31:0])
  1570. // dst[127:32] := a[127:32]
  1571. //
  1572. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi32_ss
  1573. #define _mm_cvtsi32_ss(a, b) _mm_cvt_si2ss(a, b)
  1574. // Convert the signed 64-bit integer b to a single-precision (32-bit)
  1575. // floating-point element, store the result in the lower element of dst, and
  1576. // copy the upper 3 packed elements from a to the upper elements of dst.
  1577. //
  1578. // dst[31:0] := Convert_Int64_To_FP32(b[63:0])
  1579. // dst[127:32] := a[127:32]
  1580. //
  1581. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi64_ss
  1582. FORCE_INLINE __m128 _mm_cvtsi64_ss(__m128 a, int64_t b)
  1583. {
  1584. return vreinterpretq_m128_f32(
  1585. vsetq_lane_f32((float) b, vreinterpretq_f32_m128(a), 0));
  1586. }
  1587. // Copy the lower single-precision (32-bit) floating-point element of a to dst.
  1588. //
  1589. // dst[31:0] := a[31:0]
  1590. //
  1591. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtss_f32
  1592. FORCE_INLINE float _mm_cvtss_f32(__m128 a)
  1593. {
  1594. return vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
  1595. }
  1596. // Convert the lower single-precision (32-bit) floating-point element in a to a
  1597. // 32-bit integer, and store the result in dst.
  1598. //
  1599. // dst[31:0] := Convert_FP32_To_Int32(a[31:0])
  1600. //
  1601. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtss_si32
  1602. #define _mm_cvtss_si32(a) _mm_cvt_ss2si(a)
  1603. // Convert the lower single-precision (32-bit) floating-point element in a to a
  1604. // 64-bit integer, and store the result in dst.
  1605. //
  1606. // dst[63:0] := Convert_FP32_To_Int64(a[31:0])
  1607. //
  1608. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtss_si64
  1609. FORCE_INLINE int64_t _mm_cvtss_si64(__m128 a)
  1610. {
  1611. #if defined(__aarch64__) || defined(__ARM_FEATURE_DIRECTED_ROUNDING)
  1612. return (int64_t) vgetq_lane_f32(vrndiq_f32(vreinterpretq_f32_m128(a)), 0);
  1613. #else
  1614. float32_t data = vgetq_lane_f32(
  1615. vreinterpretq_f32_m128(_mm_round_ps(a, _MM_FROUND_CUR_DIRECTION)), 0);
  1616. return (int64_t) data;
  1617. #endif
  1618. }
  1619. // Convert packed single-precision (32-bit) floating-point elements in a to
  1620. // packed 32-bit integers with truncation, and store the results in dst.
  1621. //
  1622. // FOR j := 0 to 1
  1623. // i := 32*j
  1624. // dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i])
  1625. // ENDFOR
  1626. //
  1627. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtt_ps2pi
  1628. FORCE_INLINE __m64 _mm_cvtt_ps2pi(__m128 a)
  1629. {
  1630. return vreinterpret_m64_s32(
  1631. vget_low_s32(vcvtq_s32_f32(vreinterpretq_f32_m128(a))));
  1632. }
  1633. // Convert the lower single-precision (32-bit) floating-point element in a to a
  1634. // 32-bit integer with truncation, and store the result in dst.
  1635. //
  1636. // dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0])
  1637. //
  1638. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtt_ss2si
  1639. FORCE_INLINE int _mm_cvtt_ss2si(__m128 a)
  1640. {
  1641. return vgetq_lane_s32(vcvtq_s32_f32(vreinterpretq_f32_m128(a)), 0);
  1642. }
  1643. // Convert packed single-precision (32-bit) floating-point elements in a to
  1644. // packed 32-bit integers with truncation, and store the results in dst.
  1645. //
  1646. // FOR j := 0 to 1
  1647. // i := 32*j
  1648. // dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i])
  1649. // ENDFOR
  1650. //
  1651. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttps_pi32
  1652. #define _mm_cvttps_pi32(a) _mm_cvtt_ps2pi(a)
  1653. // Convert the lower single-precision (32-bit) floating-point element in a to a
  1654. // 32-bit integer with truncation, and store the result in dst.
  1655. //
  1656. // dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0])
  1657. //
  1658. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttss_si32
  1659. #define _mm_cvttss_si32(a) _mm_cvtt_ss2si(a)
  1660. // Convert the lower single-precision (32-bit) floating-point element in a to a
  1661. // 64-bit integer with truncation, and store the result in dst.
  1662. //
  1663. // dst[63:0] := Convert_FP32_To_Int64_Truncate(a[31:0])
  1664. //
  1665. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttss_si64
  1666. FORCE_INLINE int64_t _mm_cvttss_si64(__m128 a)
  1667. {
  1668. return (int64_t) vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
  1669. }
  1670. // Divides the four single-precision, floating-point values of a and b.
  1671. //
  1672. // r0 := a0 / b0
  1673. // r1 := a1 / b1
  1674. // r2 := a2 / b2
  1675. // r3 := a3 / b3
  1676. //
  1677. // https://msdn.microsoft.com/en-us/library/edaw8147(v=vs.100).aspx
  1678. FORCE_INLINE __m128 _mm_div_ps(__m128 a, __m128 b)
  1679. {
  1680. #if defined(__aarch64__) && !SSE2NEON_PRECISE_DIV
  1681. return vreinterpretq_m128_f32(
  1682. vdivq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  1683. #else
  1684. float32x4_t recip = vrecpeq_f32(vreinterpretq_f32_m128(b));
  1685. recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(b)));
  1686. #if SSE2NEON_PRECISE_DIV
  1687. // Additional Netwon-Raphson iteration for accuracy
  1688. recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(b)));
  1689. #endif
  1690. return vreinterpretq_m128_f32(vmulq_f32(vreinterpretq_f32_m128(a), recip));
  1691. #endif
  1692. }
  1693. // Divides the scalar single-precision floating point value of a by b.
  1694. // https://msdn.microsoft.com/en-us/library/4y73xa49(v=vs.100).aspx
  1695. FORCE_INLINE __m128 _mm_div_ss(__m128 a, __m128 b)
  1696. {
  1697. float32_t value =
  1698. vgetq_lane_f32(vreinterpretq_f32_m128(_mm_div_ps(a, b)), 0);
  1699. return vreinterpretq_m128_f32(
  1700. vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0));
  1701. }
  1702. // Extract a 16-bit integer from a, selected with imm8, and store the result in
  1703. // the lower element of dst.
  1704. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_extract_pi16
  1705. #define _mm_extract_pi16(a, imm) \
  1706. (int32_t) vget_lane_u16(vreinterpret_u16_m64(a), (imm))
  1707. // Free aligned memory that was allocated with _mm_malloc.
  1708. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_free
  1709. #if !defined(SSE2NEON_ALLOC_DEFINED)
  1710. FORCE_INLINE void _mm_free(void *addr)
  1711. {
  1712. #if defined(_WIN32)
  1713. _aligned_free(addr);
  1714. #else
  1715. free(addr);
  1716. #endif
  1717. }
  1718. #endif
  1719. // Macro: Get the flush zero bits from the MXCSR control and status register.
  1720. // The flush zero may contain any of the following flags: _MM_FLUSH_ZERO_ON or
  1721. // _MM_FLUSH_ZERO_OFF
  1722. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_GET_FLUSH_ZERO_MODE
  1723. FORCE_INLINE unsigned int _sse2neon_mm_get_flush_zero_mode()
  1724. {
  1725. union {
  1726. fpcr_bitfield field;
  1727. #if defined(__aarch64__)
  1728. uint64_t value;
  1729. #else
  1730. uint32_t value;
  1731. #endif
  1732. } r;
  1733. #if defined(__aarch64__)
  1734. __asm__ __volatile__("mrs %0, FPCR" : "=r"(r.value)); /* read */
  1735. #else
  1736. __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */
  1737. #endif
  1738. return r.field.bit24 ? _MM_FLUSH_ZERO_ON : _MM_FLUSH_ZERO_OFF;
  1739. }
  1740. // Macro: Get the rounding mode bits from the MXCSR control and status register.
  1741. // The rounding mode may contain any of the following flags: _MM_ROUND_NEAREST,
  1742. // _MM_ROUND_DOWN, _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO
  1743. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_GET_ROUNDING_MODE
  1744. FORCE_INLINE unsigned int _MM_GET_ROUNDING_MODE()
  1745. {
  1746. union {
  1747. fpcr_bitfield field;
  1748. #if defined(__aarch64__)
  1749. uint64_t value;
  1750. #else
  1751. uint32_t value;
  1752. #endif
  1753. } r;
  1754. #if defined(__aarch64__)
  1755. __asm__ __volatile__("mrs %0, FPCR" : "=r"(r.value)); /* read */
  1756. #else
  1757. __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */
  1758. #endif
  1759. if (r.field.bit22) {
  1760. return r.field.bit23 ? _MM_ROUND_TOWARD_ZERO : _MM_ROUND_UP;
  1761. } else {
  1762. return r.field.bit23 ? _MM_ROUND_DOWN : _MM_ROUND_NEAREST;
  1763. }
  1764. }
  1765. // Copy a to dst, and insert the 16-bit integer i into dst at the location
  1766. // specified by imm8.
  1767. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_insert_pi16
  1768. #define _mm_insert_pi16(a, b, imm) \
  1769. __extension__({ \
  1770. vreinterpret_m64_s16( \
  1771. vset_lane_s16((b), vreinterpret_s16_m64(a), (imm))); \
  1772. })
  1773. // Loads four single-precision, floating-point values.
  1774. // https://msdn.microsoft.com/en-us/library/vstudio/zzd50xxt(v=vs.100).aspx
  1775. FORCE_INLINE __m128 _mm_load_ps(const float *p)
  1776. {
  1777. return vreinterpretq_m128_f32(vld1q_f32(p));
  1778. }
  1779. // Load a single-precision (32-bit) floating-point element from memory into all
  1780. // elements of dst.
  1781. //
  1782. // dst[31:0] := MEM[mem_addr+31:mem_addr]
  1783. // dst[63:32] := MEM[mem_addr+31:mem_addr]
  1784. // dst[95:64] := MEM[mem_addr+31:mem_addr]
  1785. // dst[127:96] := MEM[mem_addr+31:mem_addr]
  1786. //
  1787. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_ps1
  1788. #define _mm_load_ps1 _mm_load1_ps
  1789. // Loads an single - precision, floating - point value into the low word and
  1790. // clears the upper three words.
  1791. // https://msdn.microsoft.com/en-us/library/548bb9h4%28v=vs.90%29.aspx
  1792. FORCE_INLINE __m128 _mm_load_ss(const float *p)
  1793. {
  1794. return vreinterpretq_m128_f32(vsetq_lane_f32(*p, vdupq_n_f32(0), 0));
  1795. }
  1796. // Loads a single single-precision, floating-point value, copying it into all
  1797. // four words
  1798. // https://msdn.microsoft.com/en-us/library/vstudio/5cdkf716(v=vs.100).aspx
  1799. FORCE_INLINE __m128 _mm_load1_ps(const float *p)
  1800. {
  1801. return vreinterpretq_m128_f32(vld1q_dup_f32(p));
  1802. }
  1803. // Sets the upper two single-precision, floating-point values with 64
  1804. // bits of data loaded from the address p; the lower two values are passed
  1805. // through from a.
  1806. //
  1807. // r0 := a0
  1808. // r1 := a1
  1809. // r2 := *p0
  1810. // r3 := *p1
  1811. //
  1812. // https://msdn.microsoft.com/en-us/library/w92wta0x(v%3dvs.100).aspx
  1813. FORCE_INLINE __m128 _mm_loadh_pi(__m128 a, __m64 const *p)
  1814. {
  1815. return vreinterpretq_m128_f32(
  1816. vcombine_f32(vget_low_f32(a), vld1_f32((const float32_t *) p)));
  1817. }
  1818. // Sets the lower two single-precision, floating-point values with 64
  1819. // bits of data loaded from the address p; the upper two values are passed
  1820. // through from a.
  1821. //
  1822. // Return Value
  1823. // r0 := *p0
  1824. // r1 := *p1
  1825. // r2 := a2
  1826. // r3 := a3
  1827. //
  1828. // https://msdn.microsoft.com/en-us/library/s57cyak2(v=vs.100).aspx
  1829. FORCE_INLINE __m128 _mm_loadl_pi(__m128 a, __m64 const *p)
  1830. {
  1831. return vreinterpretq_m128_f32(
  1832. vcombine_f32(vld1_f32((const float32_t *) p), vget_high_f32(a)));
  1833. }
  1834. // Load 4 single-precision (32-bit) floating-point elements from memory into dst
  1835. // in reverse order. mem_addr must be aligned on a 16-byte boundary or a
  1836. // general-protection exception may be generated.
  1837. //
  1838. // dst[31:0] := MEM[mem_addr+127:mem_addr+96]
  1839. // dst[63:32] := MEM[mem_addr+95:mem_addr+64]
  1840. // dst[95:64] := MEM[mem_addr+63:mem_addr+32]
  1841. // dst[127:96] := MEM[mem_addr+31:mem_addr]
  1842. //
  1843. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadr_ps
  1844. FORCE_INLINE __m128 _mm_loadr_ps(const float *p)
  1845. {
  1846. float32x4_t v = vrev64q_f32(vld1q_f32(p));
  1847. return vreinterpretq_m128_f32(vextq_f32(v, v, 2));
  1848. }
  1849. // Loads four single-precision, floating-point values.
  1850. // https://msdn.microsoft.com/en-us/library/x1b16s7z%28v=vs.90%29.aspx
  1851. FORCE_INLINE __m128 _mm_loadu_ps(const float *p)
  1852. {
  1853. // for neon, alignment doesn't matter, so _mm_load_ps and _mm_loadu_ps are
  1854. // equivalent for neon
  1855. return vreinterpretq_m128_f32(vld1q_f32(p));
  1856. }
  1857. // Load unaligned 16-bit integer from memory into the first element of dst.
  1858. //
  1859. // dst[15:0] := MEM[mem_addr+15:mem_addr]
  1860. // dst[MAX:16] := 0
  1861. //
  1862. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_si16
  1863. FORCE_INLINE __m128i _mm_loadu_si16(const void *p)
  1864. {
  1865. return vreinterpretq_m128i_s16(
  1866. vsetq_lane_s16(*(const int16_t *) p, vdupq_n_s16(0), 0));
  1867. }
  1868. // Load unaligned 64-bit integer from memory into the first element of dst.
  1869. //
  1870. // dst[63:0] := MEM[mem_addr+63:mem_addr]
  1871. // dst[MAX:64] := 0
  1872. //
  1873. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_si64
  1874. FORCE_INLINE __m128i _mm_loadu_si64(const void *p)
  1875. {
  1876. return vreinterpretq_m128i_s64(
  1877. vcombine_s64(vld1_s64((const int64_t *) p), vdup_n_s64(0)));
  1878. }
  1879. // Allocate aligned blocks of memory.
  1880. // https://software.intel.com/en-us/
  1881. // cpp-compiler-developer-guide-and-reference-allocating-and-freeing-aligned-memory-blocks
  1882. #if !defined(SSE2NEON_ALLOC_DEFINED)
  1883. FORCE_INLINE void *_mm_malloc(size_t size, size_t align)
  1884. {
  1885. void *ptr;
  1886. if (align == 1)
  1887. return malloc(size);
  1888. if (align == 2 || (sizeof(void *) == 8 && align == 4))
  1889. align = sizeof(void *);
  1890. #if defined(_WIN32)
  1891. ptr = _aligned_malloc(size, align);
  1892. if (ptr)
  1893. return ptr;
  1894. #else
  1895. if (!posix_memalign(&ptr, align, size))
  1896. return ptr;
  1897. #endif
  1898. return NULL;
  1899. }
  1900. #endif
  1901. // Conditionally store 8-bit integer elements from a into memory using mask
  1902. // (elements are not stored when the highest bit is not set in the corresponding
  1903. // element) and a non-temporal memory hint.
  1904. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskmove_si64
  1905. FORCE_INLINE void _mm_maskmove_si64(__m64 a, __m64 mask, char *mem_addr)
  1906. {
  1907. int8x8_t shr_mask = vshr_n_s8(vreinterpret_s8_m64(mask), 7);
  1908. __m128 b = _mm_load_ps((const float *) mem_addr);
  1909. int8x8_t masked =
  1910. vbsl_s8(vreinterpret_u8_s8(shr_mask), vreinterpret_s8_m64(a),
  1911. vreinterpret_s8_u64(vget_low_u64(vreinterpretq_u64_m128(b))));
  1912. vst1_s8((int8_t *) mem_addr, masked);
  1913. }
  1914. // Conditionally store 8-bit integer elements from a into memory using mask
  1915. // (elements are not stored when the highest bit is not set in the corresponding
  1916. // element) and a non-temporal memory hint.
  1917. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_maskmovq
  1918. #define _m_maskmovq(a, mask, mem_addr) _mm_maskmove_si64(a, mask, mem_addr)
  1919. // Compare packed signed 16-bit integers in a and b, and store packed maximum
  1920. // values in dst.
  1921. //
  1922. // FOR j := 0 to 3
  1923. // i := j*16
  1924. // dst[i+15:i] := MAX(a[i+15:i], b[i+15:i])
  1925. // ENDFOR
  1926. //
  1927. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_pi16
  1928. FORCE_INLINE __m64 _mm_max_pi16(__m64 a, __m64 b)
  1929. {
  1930. return vreinterpret_m64_s16(
  1931. vmax_s16(vreinterpret_s16_m64(a), vreinterpret_s16_m64(b)));
  1932. }
  1933. // Computes the maximums of the four single-precision, floating-point values of
  1934. // a and b.
  1935. // https://msdn.microsoft.com/en-us/library/vstudio/ff5d607a(v=vs.100).aspx
  1936. FORCE_INLINE __m128 _mm_max_ps(__m128 a, __m128 b)
  1937. {
  1938. #if SSE2NEON_PRECISE_MINMAX
  1939. float32x4_t _a = vreinterpretq_f32_m128(a);
  1940. float32x4_t _b = vreinterpretq_f32_m128(b);
  1941. return vreinterpretq_m128_f32(vbslq_f32(vcgtq_f32(_a, _b), _a, _b));
  1942. #else
  1943. return vreinterpretq_m128_f32(
  1944. vmaxq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  1945. #endif
  1946. }
  1947. // Compare packed unsigned 8-bit integers in a and b, and store packed maximum
  1948. // values in dst.
  1949. //
  1950. // FOR j := 0 to 7
  1951. // i := j*8
  1952. // dst[i+7:i] := MAX(a[i+7:i], b[i+7:i])
  1953. // ENDFOR
  1954. //
  1955. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_pu8
  1956. FORCE_INLINE __m64 _mm_max_pu8(__m64 a, __m64 b)
  1957. {
  1958. return vreinterpret_m64_u8(
  1959. vmax_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b)));
  1960. }
  1961. // Computes the maximum of the two lower scalar single-precision floating point
  1962. // values of a and b.
  1963. // https://msdn.microsoft.com/en-us/library/s6db5esz(v=vs.100).aspx
  1964. FORCE_INLINE __m128 _mm_max_ss(__m128 a, __m128 b)
  1965. {
  1966. float32_t value = vgetq_lane_f32(_mm_max_ps(a, b), 0);
  1967. return vreinterpretq_m128_f32(
  1968. vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0));
  1969. }
  1970. // Compare packed signed 16-bit integers in a and b, and store packed minimum
  1971. // values in dst.
  1972. //
  1973. // FOR j := 0 to 3
  1974. // i := j*16
  1975. // dst[i+15:i] := MIN(a[i+15:i], b[i+15:i])
  1976. // ENDFOR
  1977. //
  1978. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_pi16
  1979. FORCE_INLINE __m64 _mm_min_pi16(__m64 a, __m64 b)
  1980. {
  1981. return vreinterpret_m64_s16(
  1982. vmin_s16(vreinterpret_s16_m64(a), vreinterpret_s16_m64(b)));
  1983. }
  1984. // Computes the minima of the four single-precision, floating-point values of a
  1985. // and b.
  1986. // https://msdn.microsoft.com/en-us/library/vstudio/wh13kadz(v=vs.100).aspx
  1987. FORCE_INLINE __m128 _mm_min_ps(__m128 a, __m128 b)
  1988. {
  1989. #if SSE2NEON_PRECISE_MINMAX
  1990. float32x4_t _a = vreinterpretq_f32_m128(a);
  1991. float32x4_t _b = vreinterpretq_f32_m128(b);
  1992. return vreinterpretq_m128_f32(vbslq_f32(vcltq_f32(_a, _b), _a, _b));
  1993. #else
  1994. return vreinterpretq_m128_f32(
  1995. vminq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  1996. #endif
  1997. }
  1998. // Compare packed unsigned 8-bit integers in a and b, and store packed minimum
  1999. // values in dst.
  2000. //
  2001. // FOR j := 0 to 7
  2002. // i := j*8
  2003. // dst[i+7:i] := MIN(a[i+7:i], b[i+7:i])
  2004. // ENDFOR
  2005. //
  2006. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_pu8
  2007. FORCE_INLINE __m64 _mm_min_pu8(__m64 a, __m64 b)
  2008. {
  2009. return vreinterpret_m64_u8(
  2010. vmin_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b)));
  2011. }
  2012. // Computes the minimum of the two lower scalar single-precision floating point
  2013. // values of a and b.
  2014. // https://msdn.microsoft.com/en-us/library/0a9y7xaa(v=vs.100).aspx
  2015. FORCE_INLINE __m128 _mm_min_ss(__m128 a, __m128 b)
  2016. {
  2017. float32_t value = vgetq_lane_f32(_mm_min_ps(a, b), 0);
  2018. return vreinterpretq_m128_f32(
  2019. vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0));
  2020. }
  2021. // Sets the low word to the single-precision, floating-point value of b
  2022. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/35hdzazd(v=vs.100)
  2023. FORCE_INLINE __m128 _mm_move_ss(__m128 a, __m128 b)
  2024. {
  2025. return vreinterpretq_m128_f32(
  2026. vsetq_lane_f32(vgetq_lane_f32(vreinterpretq_f32_m128(b), 0),
  2027. vreinterpretq_f32_m128(a), 0));
  2028. }
  2029. // Moves the upper two values of B into the lower two values of A.
  2030. //
  2031. // r3 := a3
  2032. // r2 := a2
  2033. // r1 := b3
  2034. // r0 := b2
  2035. FORCE_INLINE __m128 _mm_movehl_ps(__m128 __A, __m128 __B)
  2036. {
  2037. float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(__A));
  2038. float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(__B));
  2039. return vreinterpretq_m128_f32(vcombine_f32(b32, a32));
  2040. }
  2041. // Moves the lower two values of B into the upper two values of A.
  2042. //
  2043. // r3 := b1
  2044. // r2 := b0
  2045. // r1 := a1
  2046. // r0 := a0
  2047. FORCE_INLINE __m128 _mm_movelh_ps(__m128 __A, __m128 __B)
  2048. {
  2049. float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(__A));
  2050. float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(__B));
  2051. return vreinterpretq_m128_f32(vcombine_f32(a10, b10));
  2052. }
  2053. // Create mask from the most significant bit of each 8-bit element in a, and
  2054. // store the result in dst.
  2055. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movemask_pi8
  2056. FORCE_INLINE int _mm_movemask_pi8(__m64 a)
  2057. {
  2058. uint8x8_t input = vreinterpret_u8_m64(a);
  2059. #if defined(__aarch64__)
  2060. static const int8x8_t shift = {0, 1, 2, 3, 4, 5, 6, 7};
  2061. uint8x8_t tmp = vshr_n_u8(input, 7);
  2062. return vaddv_u8(vshl_u8(tmp, shift));
  2063. #else
  2064. // Refer the implementation of `_mm_movemask_epi8`
  2065. uint16x4_t high_bits = vreinterpret_u16_u8(vshr_n_u8(input, 7));
  2066. uint32x2_t paired16 =
  2067. vreinterpret_u32_u16(vsra_n_u16(high_bits, high_bits, 7));
  2068. uint8x8_t paired32 =
  2069. vreinterpret_u8_u32(vsra_n_u32(paired16, paired16, 14));
  2070. return vget_lane_u8(paired32, 0) | ((int) vget_lane_u8(paired32, 4) << 4);
  2071. #endif
  2072. }
  2073. // NEON does not provide this method
  2074. // Creates a 4-bit mask from the most significant bits of the four
  2075. // single-precision, floating-point values.
  2076. // https://msdn.microsoft.com/en-us/library/vstudio/4490ys29(v=vs.100).aspx
  2077. FORCE_INLINE int _mm_movemask_ps(__m128 a)
  2078. {
  2079. uint32x4_t input = vreinterpretq_u32_m128(a);
  2080. #if defined(__aarch64__)
  2081. static const int32x4_t shift = {0, 1, 2, 3};
  2082. uint32x4_t tmp = vshrq_n_u32(input, 31);
  2083. return vaddvq_u32(vshlq_u32(tmp, shift));
  2084. #else
  2085. // Uses the exact same method as _mm_movemask_epi8, see that for details.
  2086. // Shift out everything but the sign bits with a 32-bit unsigned shift
  2087. // right.
  2088. uint64x2_t high_bits = vreinterpretq_u64_u32(vshrq_n_u32(input, 31));
  2089. // Merge the two pairs together with a 64-bit unsigned shift right + add.
  2090. uint8x16_t paired =
  2091. vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31));
  2092. // Extract the result.
  2093. return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2);
  2094. #endif
  2095. }
  2096. // Multiplies the four single-precision, floating-point values of a and b.
  2097. //
  2098. // r0 := a0 * b0
  2099. // r1 := a1 * b1
  2100. // r2 := a2 * b2
  2101. // r3 := a3 * b3
  2102. //
  2103. // https://msdn.microsoft.com/en-us/library/vstudio/22kbk6t9(v=vs.100).aspx
  2104. FORCE_INLINE __m128 _mm_mul_ps(__m128 a, __m128 b)
  2105. {
  2106. return vreinterpretq_m128_f32(
  2107. vmulq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  2108. }
  2109. // Multiply the lower single-precision (32-bit) floating-point element in a and
  2110. // b, store the result in the lower element of dst, and copy the upper 3 packed
  2111. // elements from a to the upper elements of dst.
  2112. //
  2113. // dst[31:0] := a[31:0] * b[31:0]
  2114. // dst[127:32] := a[127:32]
  2115. //
  2116. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mul_ss
  2117. FORCE_INLINE __m128 _mm_mul_ss(__m128 a, __m128 b)
  2118. {
  2119. return _mm_move_ss(a, _mm_mul_ps(a, b));
  2120. }
  2121. // Multiply the packed unsigned 16-bit integers in a and b, producing
  2122. // intermediate 32-bit integers, and store the high 16 bits of the intermediate
  2123. // integers in dst.
  2124. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mulhi_pu16
  2125. FORCE_INLINE __m64 _mm_mulhi_pu16(__m64 a, __m64 b)
  2126. {
  2127. return vreinterpret_m64_u16(vshrn_n_u32(
  2128. vmull_u16(vreinterpret_u16_m64(a), vreinterpret_u16_m64(b)), 16));
  2129. }
  2130. // Computes the bitwise OR of the four single-precision, floating-point values
  2131. // of a and b.
  2132. // https://msdn.microsoft.com/en-us/library/vstudio/7ctdsyy0(v=vs.100).aspx
  2133. FORCE_INLINE __m128 _mm_or_ps(__m128 a, __m128 b)
  2134. {
  2135. return vreinterpretq_m128_s32(
  2136. vorrq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b)));
  2137. }
  2138. // Average packed unsigned 8-bit integers in a and b, and store the results in
  2139. // dst.
  2140. //
  2141. // FOR j := 0 to 7
  2142. // i := j*8
  2143. // dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1
  2144. // ENDFOR
  2145. //
  2146. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pavgb
  2147. #define _m_pavgb(a, b) _mm_avg_pu8(a, b)
  2148. // Average packed unsigned 16-bit integers in a and b, and store the results in
  2149. // dst.
  2150. //
  2151. // FOR j := 0 to 3
  2152. // i := j*16
  2153. // dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1
  2154. // ENDFOR
  2155. //
  2156. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pavgw
  2157. #define _m_pavgw(a, b) _mm_avg_pu16(a, b)
  2158. // Extract a 16-bit integer from a, selected with imm8, and store the result in
  2159. // the lower element of dst.
  2160. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pextrw
  2161. #define _m_pextrw(a, imm) _mm_extract_pi16(a, imm)
  2162. // Copy a to dst, and insert the 16-bit integer i into dst at the location
  2163. // specified by imm8.
  2164. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=m_pinsrw
  2165. #define _m_pinsrw(a, i, imm) _mm_insert_pi16(a, i, imm)
  2166. // Compare packed signed 16-bit integers in a and b, and store packed maximum
  2167. // values in dst.
  2168. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pmaxsw
  2169. #define _m_pmaxsw(a, b) _mm_max_pi16(a, b)
  2170. // Compare packed unsigned 8-bit integers in a and b, and store packed maximum
  2171. // values in dst.
  2172. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pmaxub
  2173. #define _m_pmaxub(a, b) _mm_max_pu8(a, b)
  2174. // Compare packed signed 16-bit integers in a and b, and store packed minimum
  2175. // values in dst.
  2176. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pminsw
  2177. #define _m_pminsw(a, b) _mm_min_pi16(a, b)
  2178. // Compare packed unsigned 8-bit integers in a and b, and store packed minimum
  2179. // values in dst.
  2180. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pminub
  2181. #define _m_pminub(a, b) _mm_min_pu8(a, b)
  2182. // Create mask from the most significant bit of each 8-bit element in a, and
  2183. // store the result in dst.
  2184. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pmovmskb
  2185. #define _m_pmovmskb(a) _mm_movemask_pi8(a)
  2186. // Multiply the packed unsigned 16-bit integers in a and b, producing
  2187. // intermediate 32-bit integers, and store the high 16 bits of the intermediate
  2188. // integers in dst.
  2189. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pmulhuw
  2190. #define _m_pmulhuw(a, b) _mm_mulhi_pu16(a, b)
  2191. // Fetch the line of data from memory that contains address p to a location in
  2192. // the cache heirarchy specified by the locality hint i.
  2193. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_prefetch
  2194. FORCE_INLINE void _mm_prefetch(char const *p, int i)
  2195. {
  2196. switch (i) {
  2197. case _MM_HINT_NTA:
  2198. __builtin_prefetch(p, 0, 0);
  2199. break;
  2200. case _MM_HINT_T0:
  2201. __builtin_prefetch(p, 0, 3);
  2202. break;
  2203. case _MM_HINT_T1:
  2204. __builtin_prefetch(p, 0, 2);
  2205. break;
  2206. case _MM_HINT_T2:
  2207. __builtin_prefetch(p, 0, 1);
  2208. break;
  2209. }
  2210. }
  2211. // Compute the absolute differences of packed unsigned 8-bit integers in a and
  2212. // b, then horizontally sum each consecutive 8 differences to produce four
  2213. // unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low
  2214. // 16 bits of dst.
  2215. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=m_psadbw
  2216. #define _m_psadbw(a, b) _mm_sad_pu8(a, b)
  2217. // Shuffle 16-bit integers in a using the control in imm8, and store the results
  2218. // in dst.
  2219. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pshufw
  2220. #define _m_pshufw(a, imm) _mm_shuffle_pi16(a, imm)
  2221. // Compute the approximate reciprocal of packed single-precision (32-bit)
  2222. // floating-point elements in a, and store the results in dst. The maximum
  2223. // relative error for this approximation is less than 1.5*2^-12.
  2224. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rcp_ps
  2225. FORCE_INLINE __m128 _mm_rcp_ps(__m128 in)
  2226. {
  2227. float32x4_t recip = vrecpeq_f32(vreinterpretq_f32_m128(in));
  2228. recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(in)));
  2229. #if SSE2NEON_PRECISE_DIV
  2230. // Additional Netwon-Raphson iteration for accuracy
  2231. recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(in)));
  2232. #endif
  2233. return vreinterpretq_m128_f32(recip);
  2234. }
  2235. // Compute the approximate reciprocal of the lower single-precision (32-bit)
  2236. // floating-point element in a, store the result in the lower element of dst,
  2237. // and copy the upper 3 packed elements from a to the upper elements of dst. The
  2238. // maximum relative error for this approximation is less than 1.5*2^-12.
  2239. //
  2240. // dst[31:0] := (1.0 / a[31:0])
  2241. // dst[127:32] := a[127:32]
  2242. //
  2243. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rcp_ss
  2244. FORCE_INLINE __m128 _mm_rcp_ss(__m128 a)
  2245. {
  2246. return _mm_move_ss(a, _mm_rcp_ps(a));
  2247. }
  2248. // Computes the approximations of the reciprocal square roots of the four
  2249. // single-precision floating point values of in.
  2250. // The current precision is 1% error.
  2251. // https://msdn.microsoft.com/en-us/library/22hfsh53(v=vs.100).aspx
  2252. FORCE_INLINE __m128 _mm_rsqrt_ps(__m128 in)
  2253. {
  2254. float32x4_t out = vrsqrteq_f32(vreinterpretq_f32_m128(in));
  2255. #if SSE2NEON_PRECISE_SQRT
  2256. // Additional Netwon-Raphson iteration for accuracy
  2257. out = vmulq_f32(
  2258. out, vrsqrtsq_f32(vmulq_f32(vreinterpretq_f32_m128(in), out), out));
  2259. out = vmulq_f32(
  2260. out, vrsqrtsq_f32(vmulq_f32(vreinterpretq_f32_m128(in), out), out));
  2261. #endif
  2262. return vreinterpretq_m128_f32(out);
  2263. }
  2264. // Compute the approximate reciprocal square root of the lower single-precision
  2265. // (32-bit) floating-point element in a, store the result in the lower element
  2266. // of dst, and copy the upper 3 packed elements from a to the upper elements of
  2267. // dst.
  2268. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rsqrt_ss
  2269. FORCE_INLINE __m128 _mm_rsqrt_ss(__m128 in)
  2270. {
  2271. return vsetq_lane_f32(vgetq_lane_f32(_mm_rsqrt_ps(in), 0), in, 0);
  2272. }
  2273. // Compute the absolute differences of packed unsigned 8-bit integers in a and
  2274. // b, then horizontally sum each consecutive 8 differences to produce four
  2275. // unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low
  2276. // 16 bits of dst.
  2277. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sad_pu8
  2278. FORCE_INLINE __m64 _mm_sad_pu8(__m64 a, __m64 b)
  2279. {
  2280. uint64x1_t t = vpaddl_u32(vpaddl_u16(
  2281. vpaddl_u8(vabd_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b)))));
  2282. return vreinterpret_m64_u16(
  2283. vset_lane_u16(vget_lane_u64(t, 0), vdup_n_u16(0), 0));
  2284. }
  2285. // Macro: Set the flush zero bits of the MXCSR control and status register to
  2286. // the value in unsigned 32-bit integer a. The flush zero may contain any of the
  2287. // following flags: _MM_FLUSH_ZERO_ON or _MM_FLUSH_ZERO_OFF
  2288. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_SET_FLUSH_ZERO_MODE
  2289. FORCE_INLINE void _sse2neon_mm_set_flush_zero_mode(unsigned int flag)
  2290. {
  2291. // AArch32 Advanced SIMD arithmetic always uses the Flush-to-zero setting,
  2292. // regardless of the value of the FZ bit.
  2293. union {
  2294. fpcr_bitfield field;
  2295. #if defined(__aarch64__)
  2296. uint64_t value;
  2297. #else
  2298. uint32_t value;
  2299. #endif
  2300. } r;
  2301. #if defined(__aarch64__)
  2302. __asm__ __volatile__("mrs %0, FPCR" : "=r"(r.value)); /* read */
  2303. #else
  2304. __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */
  2305. #endif
  2306. r.field.bit24 = (flag & _MM_FLUSH_ZERO_MASK) == _MM_FLUSH_ZERO_ON;
  2307. #if defined(__aarch64__)
  2308. __asm__ __volatile__("msr FPCR, %0" ::"r"(r)); /* write */
  2309. #else
  2310. __asm__ __volatile__("vmsr FPSCR, %0" ::"r"(r)); /* write */
  2311. #endif
  2312. }
  2313. // Sets the four single-precision, floating-point values to the four inputs.
  2314. // https://msdn.microsoft.com/en-us/library/vstudio/afh0zf75(v=vs.100).aspx
  2315. FORCE_INLINE __m128 _mm_set_ps(float w, float z, float y, float x)
  2316. {
  2317. float ALIGN_STRUCT(16) data[4] = {x, y, z, w};
  2318. return vreinterpretq_m128_f32(vld1q_f32(data));
  2319. }
  2320. // Sets the four single-precision, floating-point values to w.
  2321. // https://msdn.microsoft.com/en-us/library/vstudio/2x1se8ha(v=vs.100).aspx
  2322. FORCE_INLINE __m128 _mm_set_ps1(float _w)
  2323. {
  2324. return vreinterpretq_m128_f32(vdupq_n_f32(_w));
  2325. }
  2326. // Macro: Set the rounding mode bits of the MXCSR control and status register to
  2327. // the value in unsigned 32-bit integer a. The rounding mode may contain any of
  2328. // the following flags: _MM_ROUND_NEAREST, _MM_ROUND_DOWN, _MM_ROUND_UP,
  2329. // _MM_ROUND_TOWARD_ZERO
  2330. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_SET_ROUNDING_MODE
  2331. FORCE_INLINE void _MM_SET_ROUNDING_MODE(int rounding)
  2332. {
  2333. union {
  2334. fpcr_bitfield field;
  2335. #if defined(__aarch64__)
  2336. uint64_t value;
  2337. #else
  2338. uint32_t value;
  2339. #endif
  2340. } r;
  2341. #if defined(__aarch64__)
  2342. __asm__ __volatile__("mrs %0, FPCR" : "=r"(r.value)); /* read */
  2343. #else
  2344. __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */
  2345. #endif
  2346. switch (rounding) {
  2347. case _MM_ROUND_TOWARD_ZERO:
  2348. r.field.bit22 = 1;
  2349. r.field.bit23 = 1;
  2350. break;
  2351. case _MM_ROUND_DOWN:
  2352. r.field.bit22 = 0;
  2353. r.field.bit23 = 1;
  2354. break;
  2355. case _MM_ROUND_UP:
  2356. r.field.bit22 = 1;
  2357. r.field.bit23 = 0;
  2358. break;
  2359. default: //_MM_ROUND_NEAREST
  2360. r.field.bit22 = 0;
  2361. r.field.bit23 = 0;
  2362. }
  2363. #if defined(__aarch64__)
  2364. __asm__ __volatile__("msr FPCR, %0" ::"r"(r)); /* write */
  2365. #else
  2366. __asm__ __volatile__("vmsr FPSCR, %0" ::"r"(r)); /* write */
  2367. #endif
  2368. }
  2369. // Copy single-precision (32-bit) floating-point element a to the lower element
  2370. // of dst, and zero the upper 3 elements.
  2371. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_ss
  2372. FORCE_INLINE __m128 _mm_set_ss(float a)
  2373. {
  2374. return vreinterpretq_m128_f32(vsetq_lane_f32(a, vdupq_n_f32(0), 0));
  2375. }
  2376. // Sets the four single-precision, floating-point values to w.
  2377. //
  2378. // r0 := r1 := r2 := r3 := w
  2379. //
  2380. // https://msdn.microsoft.com/en-us/library/vstudio/2x1se8ha(v=vs.100).aspx
  2381. FORCE_INLINE __m128 _mm_set1_ps(float _w)
  2382. {
  2383. return vreinterpretq_m128_f32(vdupq_n_f32(_w));
  2384. }
  2385. // FIXME: _mm_setcsr() implementation supports changing the rounding mode only.
  2386. FORCE_INLINE void _mm_setcsr(unsigned int a)
  2387. {
  2388. _MM_SET_ROUNDING_MODE(a);
  2389. }
  2390. // FIXME: _mm_getcsr() implementation supports reading the rounding mode only.
  2391. FORCE_INLINE unsigned int _mm_getcsr()
  2392. {
  2393. return _MM_GET_ROUNDING_MODE();
  2394. }
  2395. // Sets the four single-precision, floating-point values to the four inputs in
  2396. // reverse order.
  2397. // https://msdn.microsoft.com/en-us/library/vstudio/d2172ct3(v=vs.100).aspx
  2398. FORCE_INLINE __m128 _mm_setr_ps(float w, float z, float y, float x)
  2399. {
  2400. float ALIGN_STRUCT(16) data[4] = {w, z, y, x};
  2401. return vreinterpretq_m128_f32(vld1q_f32(data));
  2402. }
  2403. // Clears the four single-precision, floating-point values.
  2404. // https://msdn.microsoft.com/en-us/library/vstudio/tk1t2tbz(v=vs.100).aspx
  2405. FORCE_INLINE __m128 _mm_setzero_ps(void)
  2406. {
  2407. return vreinterpretq_m128_f32(vdupq_n_f32(0));
  2408. }
  2409. // Shuffle 16-bit integers in a using the control in imm8, and store the results
  2410. // in dst.
  2411. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_pi16
  2412. #ifdef _sse2neon_shuffle
  2413. #define _mm_shuffle_pi16(a, imm) \
  2414. __extension__({ \
  2415. vreinterpret_m64_s16(vshuffle_s16( \
  2416. vreinterpret_s16_m64(a), vreinterpret_s16_m64(a), (imm & 0x3), \
  2417. ((imm >> 2) & 0x3), ((imm >> 4) & 0x3), ((imm >> 6) & 0x3))); \
  2418. })
  2419. #else
  2420. #define _mm_shuffle_pi16(a, imm) \
  2421. __extension__({ \
  2422. int16x4_t ret; \
  2423. ret = \
  2424. vmov_n_s16(vget_lane_s16(vreinterpret_s16_m64(a), (imm) & (0x3))); \
  2425. ret = vset_lane_s16( \
  2426. vget_lane_s16(vreinterpret_s16_m64(a), ((imm) >> 2) & 0x3), ret, \
  2427. 1); \
  2428. ret = vset_lane_s16( \
  2429. vget_lane_s16(vreinterpret_s16_m64(a), ((imm) >> 4) & 0x3), ret, \
  2430. 2); \
  2431. ret = vset_lane_s16( \
  2432. vget_lane_s16(vreinterpret_s16_m64(a), ((imm) >> 6) & 0x3), ret, \
  2433. 3); \
  2434. vreinterpret_m64_s16(ret); \
  2435. })
  2436. #endif
  2437. // Perform a serializing operation on all store-to-memory instructions that were
  2438. // issued prior to this instruction. Guarantees that every store instruction
  2439. // that precedes, in program order, is globally visible before any store
  2440. // instruction which follows the fence in program order.
  2441. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sfence
  2442. FORCE_INLINE void _mm_sfence(void)
  2443. {
  2444. _sse2neon_smp_mb();
  2445. }
  2446. // Perform a serializing operation on all load-from-memory and store-to-memory
  2447. // instructions that were issued prior to this instruction. Guarantees that
  2448. // every memory access that precedes, in program order, the memory fence
  2449. // instruction is globally visible before any memory instruction which follows
  2450. // the fence in program order.
  2451. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mfence
  2452. FORCE_INLINE void _mm_mfence(void)
  2453. {
  2454. _sse2neon_smp_mb();
  2455. }
  2456. // Perform a serializing operation on all load-from-memory instructions that
  2457. // were issued prior to this instruction. Guarantees that every load instruction
  2458. // that precedes, in program order, is globally visible before any load
  2459. // instruction which follows the fence in program order.
  2460. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_lfence
  2461. FORCE_INLINE void _mm_lfence(void)
  2462. {
  2463. _sse2neon_smp_mb();
  2464. }
  2465. // FORCE_INLINE __m128 _mm_shuffle_ps(__m128 a, __m128 b, __constrange(0,255)
  2466. // int imm)
  2467. #ifdef _sse2neon_shuffle
  2468. #define _mm_shuffle_ps(a, b, imm) \
  2469. __extension__({ \
  2470. float32x4_t _input1 = vreinterpretq_f32_m128(a); \
  2471. float32x4_t _input2 = vreinterpretq_f32_m128(b); \
  2472. float32x4_t _shuf = \
  2473. vshuffleq_s32(_input1, _input2, (imm) & (0x3), ((imm) >> 2) & 0x3, \
  2474. (((imm) >> 4) & 0x3) + 4, (((imm) >> 6) & 0x3) + 4); \
  2475. vreinterpretq_m128_f32(_shuf); \
  2476. })
  2477. #else // generic
  2478. #define _mm_shuffle_ps(a, b, imm) \
  2479. __extension__({ \
  2480. __m128 ret; \
  2481. switch (imm) { \
  2482. case _MM_SHUFFLE(1, 0, 3, 2): \
  2483. ret = _mm_shuffle_ps_1032((a), (b)); \
  2484. break; \
  2485. case _MM_SHUFFLE(2, 3, 0, 1): \
  2486. ret = _mm_shuffle_ps_2301((a), (b)); \
  2487. break; \
  2488. case _MM_SHUFFLE(0, 3, 2, 1): \
  2489. ret = _mm_shuffle_ps_0321((a), (b)); \
  2490. break; \
  2491. case _MM_SHUFFLE(2, 1, 0, 3): \
  2492. ret = _mm_shuffle_ps_2103((a), (b)); \
  2493. break; \
  2494. case _MM_SHUFFLE(1, 0, 1, 0): \
  2495. ret = _mm_movelh_ps((a), (b)); \
  2496. break; \
  2497. case _MM_SHUFFLE(1, 0, 0, 1): \
  2498. ret = _mm_shuffle_ps_1001((a), (b)); \
  2499. break; \
  2500. case _MM_SHUFFLE(0, 1, 0, 1): \
  2501. ret = _mm_shuffle_ps_0101((a), (b)); \
  2502. break; \
  2503. case _MM_SHUFFLE(3, 2, 1, 0): \
  2504. ret = _mm_shuffle_ps_3210((a), (b)); \
  2505. break; \
  2506. case _MM_SHUFFLE(0, 0, 1, 1): \
  2507. ret = _mm_shuffle_ps_0011((a), (b)); \
  2508. break; \
  2509. case _MM_SHUFFLE(0, 0, 2, 2): \
  2510. ret = _mm_shuffle_ps_0022((a), (b)); \
  2511. break; \
  2512. case _MM_SHUFFLE(2, 2, 0, 0): \
  2513. ret = _mm_shuffle_ps_2200((a), (b)); \
  2514. break; \
  2515. case _MM_SHUFFLE(3, 2, 0, 2): \
  2516. ret = _mm_shuffle_ps_3202((a), (b)); \
  2517. break; \
  2518. case _MM_SHUFFLE(3, 2, 3, 2): \
  2519. ret = _mm_movehl_ps((b), (a)); \
  2520. break; \
  2521. case _MM_SHUFFLE(1, 1, 3, 3): \
  2522. ret = _mm_shuffle_ps_1133((a), (b)); \
  2523. break; \
  2524. case _MM_SHUFFLE(2, 0, 1, 0): \
  2525. ret = _mm_shuffle_ps_2010((a), (b)); \
  2526. break; \
  2527. case _MM_SHUFFLE(2, 0, 0, 1): \
  2528. ret = _mm_shuffle_ps_2001((a), (b)); \
  2529. break; \
  2530. case _MM_SHUFFLE(2, 0, 3, 2): \
  2531. ret = _mm_shuffle_ps_2032((a), (b)); \
  2532. break; \
  2533. default: \
  2534. ret = _mm_shuffle_ps_default((a), (b), (imm)); \
  2535. break; \
  2536. } \
  2537. ret; \
  2538. })
  2539. #endif
  2540. // Computes the approximations of square roots of the four single-precision,
  2541. // floating-point values of a. First computes reciprocal square roots and then
  2542. // reciprocals of the four values.
  2543. //
  2544. // r0 := sqrt(a0)
  2545. // r1 := sqrt(a1)
  2546. // r2 := sqrt(a2)
  2547. // r3 := sqrt(a3)
  2548. //
  2549. // https://msdn.microsoft.com/en-us/library/vstudio/8z67bwwk(v=vs.100).aspx
  2550. FORCE_INLINE __m128 _mm_sqrt_ps(__m128 in)
  2551. {
  2552. #if SSE2NEON_PRECISE_SQRT
  2553. float32x4_t recip = vrsqrteq_f32(vreinterpretq_f32_m128(in));
  2554. // Test for vrsqrteq_f32(0) -> positive infinity case.
  2555. // Change to zero, so that s * 1/sqrt(s) result is zero too.
  2556. const uint32x4_t pos_inf = vdupq_n_u32(0x7F800000);
  2557. const uint32x4_t div_by_zero =
  2558. vceqq_u32(pos_inf, vreinterpretq_u32_f32(recip));
  2559. recip = vreinterpretq_f32_u32(
  2560. vandq_u32(vmvnq_u32(div_by_zero), vreinterpretq_u32_f32(recip)));
  2561. // Additional Netwon-Raphson iteration for accuracy
  2562. recip = vmulq_f32(
  2563. vrsqrtsq_f32(vmulq_f32(recip, recip), vreinterpretq_f32_m128(in)),
  2564. recip);
  2565. recip = vmulq_f32(
  2566. vrsqrtsq_f32(vmulq_f32(recip, recip), vreinterpretq_f32_m128(in)),
  2567. recip);
  2568. // sqrt(s) = s * 1/sqrt(s)
  2569. return vreinterpretq_m128_f32(vmulq_f32(vreinterpretq_f32_m128(in), recip));
  2570. #elif defined(__aarch64__)
  2571. return vreinterpretq_m128_f32(vsqrtq_f32(vreinterpretq_f32_m128(in)));
  2572. #else
  2573. float32x4_t recipsq = vrsqrteq_f32(vreinterpretq_f32_m128(in));
  2574. float32x4_t sq = vrecpeq_f32(recipsq);
  2575. return vreinterpretq_m128_f32(sq);
  2576. #endif
  2577. }
  2578. // Computes the approximation of the square root of the scalar single-precision
  2579. // floating point value of in.
  2580. // https://msdn.microsoft.com/en-us/library/ahfsc22d(v=vs.100).aspx
  2581. FORCE_INLINE __m128 _mm_sqrt_ss(__m128 in)
  2582. {
  2583. float32_t value =
  2584. vgetq_lane_f32(vreinterpretq_f32_m128(_mm_sqrt_ps(in)), 0);
  2585. return vreinterpretq_m128_f32(
  2586. vsetq_lane_f32(value, vreinterpretq_f32_m128(in), 0));
  2587. }
  2588. // Stores four single-precision, floating-point values.
  2589. // https://msdn.microsoft.com/en-us/library/vstudio/s3h4ay6y(v=vs.100).aspx
  2590. FORCE_INLINE void _mm_store_ps(float *p, __m128 a)
  2591. {
  2592. vst1q_f32(p, vreinterpretq_f32_m128(a));
  2593. }
  2594. // Store the lower single-precision (32-bit) floating-point element from a into
  2595. // 4 contiguous elements in memory. mem_addr must be aligned on a 16-byte
  2596. // boundary or a general-protection exception may be generated.
  2597. //
  2598. // MEM[mem_addr+31:mem_addr] := a[31:0]
  2599. // MEM[mem_addr+63:mem_addr+32] := a[31:0]
  2600. // MEM[mem_addr+95:mem_addr+64] := a[31:0]
  2601. // MEM[mem_addr+127:mem_addr+96] := a[31:0]
  2602. //
  2603. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_ps1
  2604. FORCE_INLINE void _mm_store_ps1(float *p, __m128 a)
  2605. {
  2606. float32_t a0 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
  2607. vst1q_f32(p, vdupq_n_f32(a0));
  2608. }
  2609. // Stores the lower single - precision, floating - point value.
  2610. // https://msdn.microsoft.com/en-us/library/tzz10fbx(v=vs.100).aspx
  2611. FORCE_INLINE void _mm_store_ss(float *p, __m128 a)
  2612. {
  2613. vst1q_lane_f32(p, vreinterpretq_f32_m128(a), 0);
  2614. }
  2615. // Store the lower single-precision (32-bit) floating-point element from a into
  2616. // 4 contiguous elements in memory. mem_addr must be aligned on a 16-byte
  2617. // boundary or a general-protection exception may be generated.
  2618. //
  2619. // MEM[mem_addr+31:mem_addr] := a[31:0]
  2620. // MEM[mem_addr+63:mem_addr+32] := a[31:0]
  2621. // MEM[mem_addr+95:mem_addr+64] := a[31:0]
  2622. // MEM[mem_addr+127:mem_addr+96] := a[31:0]
  2623. //
  2624. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store1_ps
  2625. #define _mm_store1_ps _mm_store_ps1
  2626. // Stores the upper two single-precision, floating-point values of a to the
  2627. // address p.
  2628. //
  2629. // *p0 := a2
  2630. // *p1 := a3
  2631. //
  2632. // https://msdn.microsoft.com/en-us/library/a7525fs8(v%3dvs.90).aspx
  2633. FORCE_INLINE void _mm_storeh_pi(__m64 *p, __m128 a)
  2634. {
  2635. *p = vreinterpret_m64_f32(vget_high_f32(a));
  2636. }
  2637. // Stores the lower two single-precision floating point values of a to the
  2638. // address p.
  2639. //
  2640. // *p0 := a0
  2641. // *p1 := a1
  2642. //
  2643. // https://msdn.microsoft.com/en-us/library/h54t98ks(v=vs.90).aspx
  2644. FORCE_INLINE void _mm_storel_pi(__m64 *p, __m128 a)
  2645. {
  2646. *p = vreinterpret_m64_f32(vget_low_f32(a));
  2647. }
  2648. // Store 4 single-precision (32-bit) floating-point elements from a into memory
  2649. // in reverse order. mem_addr must be aligned on a 16-byte boundary or a
  2650. // general-protection exception may be generated.
  2651. //
  2652. // MEM[mem_addr+31:mem_addr] := a[127:96]
  2653. // MEM[mem_addr+63:mem_addr+32] := a[95:64]
  2654. // MEM[mem_addr+95:mem_addr+64] := a[63:32]
  2655. // MEM[mem_addr+127:mem_addr+96] := a[31:0]
  2656. //
  2657. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storer_ps
  2658. FORCE_INLINE void _mm_storer_ps(float *p, __m128 a)
  2659. {
  2660. float32x4_t tmp = vrev64q_f32(vreinterpretq_f32_m128(a));
  2661. float32x4_t rev = vextq_f32(tmp, tmp, 2);
  2662. vst1q_f32(p, rev);
  2663. }
  2664. // Stores four single-precision, floating-point values.
  2665. // https://msdn.microsoft.com/en-us/library/44e30x22(v=vs.100).aspx
  2666. FORCE_INLINE void _mm_storeu_ps(float *p, __m128 a)
  2667. {
  2668. vst1q_f32(p, vreinterpretq_f32_m128(a));
  2669. }
  2670. // Stores 16-bits of integer data a at the address p.
  2671. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_si16
  2672. FORCE_INLINE void _mm_storeu_si16(void *p, __m128i a)
  2673. {
  2674. vst1q_lane_s16((int16_t *) p, vreinterpretq_s16_m128i(a), 0);
  2675. }
  2676. // Stores 64-bits of integer data a at the address p.
  2677. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_si64
  2678. FORCE_INLINE void _mm_storeu_si64(void *p, __m128i a)
  2679. {
  2680. vst1q_lane_s64((int64_t *) p, vreinterpretq_s64_m128i(a), 0);
  2681. }
  2682. // Store 64-bits of integer data from a into memory using a non-temporal memory
  2683. // hint.
  2684. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_pi
  2685. FORCE_INLINE void _mm_stream_pi(__m64 *p, __m64 a)
  2686. {
  2687. vst1_s64((int64_t *) p, vreinterpret_s64_m64(a));
  2688. }
  2689. // Store 128-bits (composed of 4 packed single-precision (32-bit) floating-
  2690. // point elements) from a into memory using a non-temporal memory hint.
  2691. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_ps
  2692. FORCE_INLINE void _mm_stream_ps(float *p, __m128 a)
  2693. {
  2694. #if __has_builtin(__builtin_nontemporal_store)
  2695. __builtin_nontemporal_store(reinterpret_cast<float32x4_t>(a), (float32x4_t *) p);
  2696. #else
  2697. vst1q_f32(p, vreinterpretq_f32_m128(a));
  2698. #endif
  2699. }
  2700. // Subtracts the four single-precision, floating-point values of a and b.
  2701. //
  2702. // r0 := a0 - b0
  2703. // r1 := a1 - b1
  2704. // r2 := a2 - b2
  2705. // r3 := a3 - b3
  2706. //
  2707. // https://msdn.microsoft.com/en-us/library/vstudio/1zad2k61(v=vs.100).aspx
  2708. FORCE_INLINE __m128 _mm_sub_ps(__m128 a, __m128 b)
  2709. {
  2710. return vreinterpretq_m128_f32(
  2711. vsubq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  2712. }
  2713. // Subtract the lower single-precision (32-bit) floating-point element in b from
  2714. // the lower single-precision (32-bit) floating-point element in a, store the
  2715. // result in the lower element of dst, and copy the upper 3 packed elements from
  2716. // a to the upper elements of dst.
  2717. //
  2718. // dst[31:0] := a[31:0] - b[31:0]
  2719. // dst[127:32] := a[127:32]
  2720. //
  2721. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_ss
  2722. FORCE_INLINE __m128 _mm_sub_ss(__m128 a, __m128 b)
  2723. {
  2724. return _mm_move_ss(a, _mm_sub_ps(a, b));
  2725. }
  2726. // Macro: Transpose the 4x4 matrix formed by the 4 rows of single-precision
  2727. // (32-bit) floating-point elements in row0, row1, row2, and row3, and store the
  2728. // transposed matrix in these vectors (row0 now contains column 0, etc.).
  2729. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=MM_TRANSPOSE4_PS
  2730. #define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
  2731. do { \
  2732. float32x4x2_t ROW01 = vtrnq_f32(row0, row1); \
  2733. float32x4x2_t ROW23 = vtrnq_f32(row2, row3); \
  2734. row0 = vcombine_f32(vget_low_f32(ROW01.val[0]), \
  2735. vget_low_f32(ROW23.val[0])); \
  2736. row1 = vcombine_f32(vget_low_f32(ROW01.val[1]), \
  2737. vget_low_f32(ROW23.val[1])); \
  2738. row2 = vcombine_f32(vget_high_f32(ROW01.val[0]), \
  2739. vget_high_f32(ROW23.val[0])); \
  2740. row3 = vcombine_f32(vget_high_f32(ROW01.val[1]), \
  2741. vget_high_f32(ROW23.val[1])); \
  2742. } while (0)
  2743. // according to the documentation, these intrinsics behave the same as the
  2744. // non-'u' versions. We'll just alias them here.
  2745. #define _mm_ucomieq_ss _mm_comieq_ss
  2746. #define _mm_ucomige_ss _mm_comige_ss
  2747. #define _mm_ucomigt_ss _mm_comigt_ss
  2748. #define _mm_ucomile_ss _mm_comile_ss
  2749. #define _mm_ucomilt_ss _mm_comilt_ss
  2750. #define _mm_ucomineq_ss _mm_comineq_ss
  2751. // Return vector of type __m128i with undefined elements.
  2752. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_undefined_si128
  2753. FORCE_INLINE __m128i _mm_undefined_si128(void)
  2754. {
  2755. #if defined(__GNUC__) || defined(__clang__)
  2756. #pragma GCC diagnostic push
  2757. #pragma GCC diagnostic ignored "-Wuninitialized"
  2758. #endif
  2759. __m128i a;
  2760. return a;
  2761. #if defined(__GNUC__) || defined(__clang__)
  2762. #pragma GCC diagnostic pop
  2763. #endif
  2764. }
  2765. // Return vector of type __m128 with undefined elements.
  2766. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_undefined_ps
  2767. FORCE_INLINE __m128 _mm_undefined_ps(void)
  2768. {
  2769. #if defined(__GNUC__) || defined(__clang__)
  2770. #pragma GCC diagnostic push
  2771. #pragma GCC diagnostic ignored "-Wuninitialized"
  2772. #endif
  2773. __m128 a;
  2774. return a;
  2775. #if defined(__GNUC__) || defined(__clang__)
  2776. #pragma GCC diagnostic pop
  2777. #endif
  2778. }
  2779. // Selects and interleaves the upper two single-precision, floating-point values
  2780. // from a and b.
  2781. //
  2782. // r0 := a2
  2783. // r1 := b2
  2784. // r2 := a3
  2785. // r3 := b3
  2786. //
  2787. // https://msdn.microsoft.com/en-us/library/skccxx7d%28v=vs.90%29.aspx
  2788. FORCE_INLINE __m128 _mm_unpackhi_ps(__m128 a, __m128 b)
  2789. {
  2790. #if defined(__aarch64__)
  2791. return vreinterpretq_m128_f32(
  2792. vzip2q_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  2793. #else
  2794. float32x2_t a1 = vget_high_f32(vreinterpretq_f32_m128(a));
  2795. float32x2_t b1 = vget_high_f32(vreinterpretq_f32_m128(b));
  2796. float32x2x2_t result = vzip_f32(a1, b1);
  2797. return vreinterpretq_m128_f32(vcombine_f32(result.val[0], result.val[1]));
  2798. #endif
  2799. }
  2800. // Selects and interleaves the lower two single-precision, floating-point values
  2801. // from a and b.
  2802. //
  2803. // r0 := a0
  2804. // r1 := b0
  2805. // r2 := a1
  2806. // r3 := b1
  2807. //
  2808. // https://msdn.microsoft.com/en-us/library/25st103b%28v=vs.90%29.aspx
  2809. FORCE_INLINE __m128 _mm_unpacklo_ps(__m128 a, __m128 b)
  2810. {
  2811. #if defined(__aarch64__)
  2812. return vreinterpretq_m128_f32(
  2813. vzip1q_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  2814. #else
  2815. float32x2_t a1 = vget_low_f32(vreinterpretq_f32_m128(a));
  2816. float32x2_t b1 = vget_low_f32(vreinterpretq_f32_m128(b));
  2817. float32x2x2_t result = vzip_f32(a1, b1);
  2818. return vreinterpretq_m128_f32(vcombine_f32(result.val[0], result.val[1]));
  2819. #endif
  2820. }
  2821. // Computes bitwise EXOR (exclusive-or) of the four single-precision,
  2822. // floating-point values of a and b.
  2823. // https://msdn.microsoft.com/en-us/library/ss6k3wk8(v=vs.100).aspx
  2824. FORCE_INLINE __m128 _mm_xor_ps(__m128 a, __m128 b)
  2825. {
  2826. return vreinterpretq_m128_s32(
  2827. veorq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b)));
  2828. }
  2829. /* SSE2 */
  2830. // Adds the 8 signed or unsigned 16-bit integers in a to the 8 signed or
  2831. // unsigned 16-bit integers in b.
  2832. // https://msdn.microsoft.com/en-us/library/fceha5k4(v=vs.100).aspx
  2833. FORCE_INLINE __m128i _mm_add_epi16(__m128i a, __m128i b)
  2834. {
  2835. return vreinterpretq_m128i_s16(
  2836. vaddq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
  2837. }
  2838. // Adds the 4 signed or unsigned 32-bit integers in a to the 4 signed or
  2839. // unsigned 32-bit integers in b.
  2840. //
  2841. // r0 := a0 + b0
  2842. // r1 := a1 + b1
  2843. // r2 := a2 + b2
  2844. // r3 := a3 + b3
  2845. //
  2846. // https://msdn.microsoft.com/en-us/library/vstudio/09xs4fkk(v=vs.100).aspx
  2847. FORCE_INLINE __m128i _mm_add_epi32(__m128i a, __m128i b)
  2848. {
  2849. return vreinterpretq_m128i_s32(
  2850. vaddq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
  2851. }
  2852. // Adds the 4 signed or unsigned 64-bit integers in a to the 4 signed or
  2853. // unsigned 32-bit integers in b.
  2854. // https://msdn.microsoft.com/en-us/library/vstudio/09xs4fkk(v=vs.100).aspx
  2855. FORCE_INLINE __m128i _mm_add_epi64(__m128i a, __m128i b)
  2856. {
  2857. return vreinterpretq_m128i_s64(
  2858. vaddq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b)));
  2859. }
  2860. // Adds the 16 signed or unsigned 8-bit integers in a to the 16 signed or
  2861. // unsigned 8-bit integers in b.
  2862. // https://technet.microsoft.com/en-us/subscriptions/yc7tcyzs(v=vs.90)
  2863. FORCE_INLINE __m128i _mm_add_epi8(__m128i a, __m128i b)
  2864. {
  2865. return vreinterpretq_m128i_s8(
  2866. vaddq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
  2867. }
  2868. // Add packed double-precision (64-bit) floating-point elements in a and b, and
  2869. // store the results in dst.
  2870. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_pd
  2871. FORCE_INLINE __m128d _mm_add_pd(__m128d a, __m128d b)
  2872. {
  2873. #if defined(__aarch64__)
  2874. return vreinterpretq_m128d_f64(
  2875. vaddq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
  2876. #else
  2877. double *da = (double *) &a;
  2878. double *db = (double *) &b;
  2879. double c[2];
  2880. c[0] = da[0] + db[0];
  2881. c[1] = da[1] + db[1];
  2882. return vld1q_f32((float32_t *) c);
  2883. #endif
  2884. }
  2885. // Add the lower double-precision (64-bit) floating-point element in a and b,
  2886. // store the result in the lower element of dst, and copy the upper element from
  2887. // a to the upper element of dst.
  2888. //
  2889. // dst[63:0] := a[63:0] + b[63:0]
  2890. // dst[127:64] := a[127:64]
  2891. //
  2892. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_sd
  2893. FORCE_INLINE __m128d _mm_add_sd(__m128d a, __m128d b)
  2894. {
  2895. #if defined(__aarch64__)
  2896. return _mm_move_sd(a, _mm_add_pd(a, b));
  2897. #else
  2898. double *da = (double *) &a;
  2899. double *db = (double *) &b;
  2900. double c[2];
  2901. c[0] = da[0] + db[0];
  2902. c[1] = da[1];
  2903. return vld1q_f32((float32_t *) c);
  2904. #endif
  2905. }
  2906. // Add 64-bit integers a and b, and store the result in dst.
  2907. //
  2908. // dst[63:0] := a[63:0] + b[63:0]
  2909. //
  2910. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_si64
  2911. FORCE_INLINE __m64 _mm_add_si64(__m64 a, __m64 b)
  2912. {
  2913. return vreinterpret_m64_s64(
  2914. vadd_s64(vreinterpret_s64_m64(a), vreinterpret_s64_m64(b)));
  2915. }
  2916. // Adds the 8 signed 16-bit integers in a to the 8 signed 16-bit integers in b
  2917. // and saturates.
  2918. //
  2919. // r0 := SignedSaturate(a0 + b0)
  2920. // r1 := SignedSaturate(a1 + b1)
  2921. // ...
  2922. // r7 := SignedSaturate(a7 + b7)
  2923. //
  2924. // https://msdn.microsoft.com/en-us/library/1a306ef8(v=vs.100).aspx
  2925. FORCE_INLINE __m128i _mm_adds_epi16(__m128i a, __m128i b)
  2926. {
  2927. return vreinterpretq_m128i_s16(
  2928. vqaddq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
  2929. }
  2930. // Add packed signed 8-bit integers in a and b using saturation, and store the
  2931. // results in dst.
  2932. //
  2933. // FOR j := 0 to 15
  2934. // i := j*8
  2935. // dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] )
  2936. // ENDFOR
  2937. //
  2938. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_adds_epi8
  2939. FORCE_INLINE __m128i _mm_adds_epi8(__m128i a, __m128i b)
  2940. {
  2941. return vreinterpretq_m128i_s8(
  2942. vqaddq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
  2943. }
  2944. // Add packed unsigned 16-bit integers in a and b using saturation, and store
  2945. // the results in dst.
  2946. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_adds_epu16
  2947. FORCE_INLINE __m128i _mm_adds_epu16(__m128i a, __m128i b)
  2948. {
  2949. return vreinterpretq_m128i_u16(
  2950. vqaddq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b)));
  2951. }
  2952. // Adds the 16 unsigned 8-bit integers in a to the 16 unsigned 8-bit integers in
  2953. // b and saturates..
  2954. // https://msdn.microsoft.com/en-us/library/9hahyddy(v=vs.100).aspx
  2955. FORCE_INLINE __m128i _mm_adds_epu8(__m128i a, __m128i b)
  2956. {
  2957. return vreinterpretq_m128i_u8(
  2958. vqaddq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
  2959. }
  2960. // Compute the bitwise AND of packed double-precision (64-bit) floating-point
  2961. // elements in a and b, and store the results in dst.
  2962. //
  2963. // FOR j := 0 to 1
  2964. // i := j*64
  2965. // dst[i+63:i] := a[i+63:i] AND b[i+63:i]
  2966. // ENDFOR
  2967. //
  2968. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_and_pd
  2969. FORCE_INLINE __m128d _mm_and_pd(__m128d a, __m128d b)
  2970. {
  2971. return vreinterpretq_m128d_s64(
  2972. vandq_s64(vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b)));
  2973. }
  2974. // Computes the bitwise AND of the 128-bit value in a and the 128-bit value in
  2975. // b.
  2976. //
  2977. // r := a & b
  2978. //
  2979. // https://msdn.microsoft.com/en-us/library/vstudio/6d1txsa8(v=vs.100).aspx
  2980. FORCE_INLINE __m128i _mm_and_si128(__m128i a, __m128i b)
  2981. {
  2982. return vreinterpretq_m128i_s32(
  2983. vandq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
  2984. }
  2985. // Compute the bitwise NOT of packed double-precision (64-bit) floating-point
  2986. // elements in a and then AND with b, and store the results in dst.
  2987. //
  2988. // FOR j := 0 to 1
  2989. // i := j*64
  2990. // dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i])
  2991. // ENDFOR
  2992. //
  2993. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_andnot_pd
  2994. FORCE_INLINE __m128d _mm_andnot_pd(__m128d a, __m128d b)
  2995. {
  2996. // *NOTE* argument swap
  2997. return vreinterpretq_m128d_s64(
  2998. vbicq_s64(vreinterpretq_s64_m128d(b), vreinterpretq_s64_m128d(a)));
  2999. }
  3000. // Computes the bitwise AND of the 128-bit value in b and the bitwise NOT of the
  3001. // 128-bit value in a.
  3002. //
  3003. // r := (~a) & b
  3004. //
  3005. // https://msdn.microsoft.com/en-us/library/vstudio/1beaceh8(v=vs.100).aspx
  3006. FORCE_INLINE __m128i _mm_andnot_si128(__m128i a, __m128i b)
  3007. {
  3008. return vreinterpretq_m128i_s32(
  3009. vbicq_s32(vreinterpretq_s32_m128i(b),
  3010. vreinterpretq_s32_m128i(a))); // *NOTE* argument swap
  3011. }
  3012. // Computes the average of the 8 unsigned 16-bit integers in a and the 8
  3013. // unsigned 16-bit integers in b and rounds.
  3014. //
  3015. // r0 := (a0 + b0) / 2
  3016. // r1 := (a1 + b1) / 2
  3017. // ...
  3018. // r7 := (a7 + b7) / 2
  3019. //
  3020. // https://msdn.microsoft.com/en-us/library/vstudio/y13ca3c8(v=vs.90).aspx
  3021. FORCE_INLINE __m128i _mm_avg_epu16(__m128i a, __m128i b)
  3022. {
  3023. return (__m128i) vrhaddq_u16(vreinterpretq_u16_m128i(a),
  3024. vreinterpretq_u16_m128i(b));
  3025. }
  3026. // Computes the average of the 16 unsigned 8-bit integers in a and the 16
  3027. // unsigned 8-bit integers in b and rounds.
  3028. //
  3029. // r0 := (a0 + b0) / 2
  3030. // r1 := (a1 + b1) / 2
  3031. // ...
  3032. // r15 := (a15 + b15) / 2
  3033. //
  3034. // https://msdn.microsoft.com/en-us/library/vstudio/8zwh554a(v%3dvs.90).aspx
  3035. FORCE_INLINE __m128i _mm_avg_epu8(__m128i a, __m128i b)
  3036. {
  3037. return vreinterpretq_m128i_u8(
  3038. vrhaddq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
  3039. }
  3040. // Shift a left by imm8 bytes while shifting in zeros, and store the results in
  3041. // dst.
  3042. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_bslli_si128
  3043. #define _mm_bslli_si128(a, imm) _mm_slli_si128(a, imm)
  3044. // Shift a right by imm8 bytes while shifting in zeros, and store the results in
  3045. // dst.
  3046. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_bsrli_si128
  3047. #define _mm_bsrli_si128(a, imm) _mm_srli_si128(a, imm)
  3048. // Cast vector of type __m128d to type __m128. This intrinsic is only used for
  3049. // compilation and does not generate any instructions, thus it has zero latency.
  3050. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castpd_ps
  3051. FORCE_INLINE __m128 _mm_castpd_ps(__m128d a)
  3052. {
  3053. return vreinterpretq_m128_s64(vreinterpretq_s64_m128d(a));
  3054. }
  3055. // Cast vector of type __m128d to type __m128i. This intrinsic is only used for
  3056. // compilation and does not generate any instructions, thus it has zero latency.
  3057. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castpd_si128
  3058. FORCE_INLINE __m128i _mm_castpd_si128(__m128d a)
  3059. {
  3060. return vreinterpretq_m128i_s64(vreinterpretq_s64_m128d(a));
  3061. }
  3062. // Cast vector of type __m128 to type __m128d. This intrinsic is only used for
  3063. // compilation and does not generate any instructions, thus it has zero latency.
  3064. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castps_pd
  3065. FORCE_INLINE __m128d _mm_castps_pd(__m128 a)
  3066. {
  3067. return vreinterpretq_m128d_s32(vreinterpretq_s32_m128(a));
  3068. }
  3069. // Applies a type cast to reinterpret four 32-bit floating point values passed
  3070. // in as a 128-bit parameter as packed 32-bit integers.
  3071. // https://msdn.microsoft.com/en-us/library/bb514099.aspx
  3072. FORCE_INLINE __m128i _mm_castps_si128(__m128 a)
  3073. {
  3074. return vreinterpretq_m128i_s32(vreinterpretq_s32_m128(a));
  3075. }
  3076. // Cast vector of type __m128i to type __m128d. This intrinsic is only used for
  3077. // compilation and does not generate any instructions, thus it has zero latency.
  3078. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castsi128_pd
  3079. FORCE_INLINE __m128d _mm_castsi128_pd(__m128i a)
  3080. {
  3081. #if defined(__aarch64__)
  3082. return vreinterpretq_m128d_f64(vreinterpretq_f64_m128i(a));
  3083. #else
  3084. return vreinterpretq_m128d_f32(vreinterpretq_f32_m128i(a));
  3085. #endif
  3086. }
  3087. // Applies a type cast to reinterpret four 32-bit integers passed in as a
  3088. // 128-bit parameter as packed 32-bit floating point values.
  3089. // https://msdn.microsoft.com/en-us/library/bb514029.aspx
  3090. FORCE_INLINE __m128 _mm_castsi128_ps(__m128i a)
  3091. {
  3092. return vreinterpretq_m128_s32(vreinterpretq_s32_m128i(a));
  3093. }
  3094. // Invalidate and flush the cache line that contains p from all levels of the
  3095. // cache hierarchy.
  3096. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_clflush
  3097. #if defined(__APPLE__)
  3098. #include <libkern/OSCacheControl.h>
  3099. #endif
  3100. FORCE_INLINE void _mm_clflush(void const *p)
  3101. {
  3102. (void) p;
  3103. /* sys_icache_invalidate is supported since macOS 10.5.
  3104. * However, it does not work on non-jailbroken iOS devices, although the
  3105. * compilation is successful.
  3106. */
  3107. #if defined(__APPLE__)
  3108. sys_icache_invalidate((void *) (uintptr_t) p, SSE2NEON_CACHELINE_SIZE);
  3109. #elif defined(__GNUC__) || defined(__clang__)
  3110. uintptr_t ptr = (uintptr_t) p;
  3111. __builtin___clear_cache((char *) ptr,
  3112. (char *) ptr + SSE2NEON_CACHELINE_SIZE);
  3113. #else
  3114. /* FIXME: MSVC support */
  3115. #endif
  3116. }
  3117. // Compares the 8 signed or unsigned 16-bit integers in a and the 8 signed or
  3118. // unsigned 16-bit integers in b for equality.
  3119. // https://msdn.microsoft.com/en-us/library/2ay060te(v=vs.100).aspx
  3120. FORCE_INLINE __m128i _mm_cmpeq_epi16(__m128i a, __m128i b)
  3121. {
  3122. return vreinterpretq_m128i_u16(
  3123. vceqq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
  3124. }
  3125. // Compare packed 32-bit integers in a and b for equality, and store the results
  3126. // in dst
  3127. FORCE_INLINE __m128i _mm_cmpeq_epi32(__m128i a, __m128i b)
  3128. {
  3129. return vreinterpretq_m128i_u32(
  3130. vceqq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
  3131. }
  3132. // Compares the 16 signed or unsigned 8-bit integers in a and the 16 signed or
  3133. // unsigned 8-bit integers in b for equality.
  3134. // https://msdn.microsoft.com/en-us/library/windows/desktop/bz5xk21a(v=vs.90).aspx
  3135. FORCE_INLINE __m128i _mm_cmpeq_epi8(__m128i a, __m128i b)
  3136. {
  3137. return vreinterpretq_m128i_u8(
  3138. vceqq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
  3139. }
  3140. // Compare packed double-precision (64-bit) floating-point elements in a and b
  3141. // for equality, and store the results in dst.
  3142. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_pd
  3143. FORCE_INLINE __m128d _mm_cmpeq_pd(__m128d a, __m128d b)
  3144. {
  3145. #if defined(__aarch64__)
  3146. return vreinterpretq_m128d_u64(
  3147. vceqq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
  3148. #else
  3149. // (a == b) -> (a_lo == b_lo) && (a_hi == b_hi)
  3150. uint32x4_t cmp =
  3151. vceqq_u32(vreinterpretq_u32_m128d(a), vreinterpretq_u32_m128d(b));
  3152. uint32x4_t swapped = vrev64q_u32(cmp);
  3153. return vreinterpretq_m128d_u32(vandq_u32(cmp, swapped));
  3154. #endif
  3155. }
  3156. // Compare the lower double-precision (64-bit) floating-point elements in a and
  3157. // b for equality, store the result in the lower element of dst, and copy the
  3158. // upper element from a to the upper element of dst.
  3159. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_sd
  3160. FORCE_INLINE __m128d _mm_cmpeq_sd(__m128d a, __m128d b)
  3161. {
  3162. return _mm_move_sd(a, _mm_cmpeq_pd(a, b));
  3163. }
  3164. // Compare packed double-precision (64-bit) floating-point elements in a and b
  3165. // for greater-than-or-equal, and store the results in dst.
  3166. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_pd
  3167. FORCE_INLINE __m128d _mm_cmpge_pd(__m128d a, __m128d b)
  3168. {
  3169. #if defined(__aarch64__)
  3170. return vreinterpretq_m128d_u64(
  3171. vcgeq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
  3172. #else
  3173. uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
  3174. uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
  3175. uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
  3176. uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
  3177. uint64_t d[2];
  3178. d[0] = (*(double *) &a0) >= (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
  3179. d[1] = (*(double *) &a1) >= (*(double *) &b1) ? ~UINT64_C(0) : UINT64_C(0);
  3180. return vreinterpretq_m128d_u64(vld1q_u64(d));
  3181. #endif
  3182. }
  3183. // Compare the lower double-precision (64-bit) floating-point elements in a and
  3184. // b for greater-than-or-equal, store the result in the lower element of dst,
  3185. // and copy the upper element from a to the upper element of dst.
  3186. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_sd
  3187. FORCE_INLINE __m128d _mm_cmpge_sd(__m128d a, __m128d b)
  3188. {
  3189. #if defined(__aarch64__)
  3190. return _mm_move_sd(a, _mm_cmpge_pd(a, b));
  3191. #else
  3192. // expand "_mm_cmpge_pd()" to reduce unnecessary operations
  3193. uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
  3194. uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
  3195. uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
  3196. uint64_t d[2];
  3197. d[0] = (*(double *) &a0) >= (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
  3198. d[1] = a1;
  3199. return vreinterpretq_m128d_u64(vld1q_u64(d));
  3200. #endif
  3201. }
  3202. // Compares the 8 signed 16-bit integers in a and the 8 signed 16-bit integers
  3203. // in b for greater than.
  3204. //
  3205. // r0 := (a0 > b0) ? 0xffff : 0x0
  3206. // r1 := (a1 > b1) ? 0xffff : 0x0
  3207. // ...
  3208. // r7 := (a7 > b7) ? 0xffff : 0x0
  3209. //
  3210. // https://technet.microsoft.com/en-us/library/xd43yfsa(v=vs.100).aspx
  3211. FORCE_INLINE __m128i _mm_cmpgt_epi16(__m128i a, __m128i b)
  3212. {
  3213. return vreinterpretq_m128i_u16(
  3214. vcgtq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
  3215. }
  3216. // Compares the 4 signed 32-bit integers in a and the 4 signed 32-bit integers
  3217. // in b for greater than.
  3218. // https://msdn.microsoft.com/en-us/library/vstudio/1s9f2z0y(v=vs.100).aspx
  3219. FORCE_INLINE __m128i _mm_cmpgt_epi32(__m128i a, __m128i b)
  3220. {
  3221. return vreinterpretq_m128i_u32(
  3222. vcgtq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
  3223. }
  3224. // Compares the 16 signed 8-bit integers in a and the 16 signed 8-bit integers
  3225. // in b for greater than.
  3226. //
  3227. // r0 := (a0 > b0) ? 0xff : 0x0
  3228. // r1 := (a1 > b1) ? 0xff : 0x0
  3229. // ...
  3230. // r15 := (a15 > b15) ? 0xff : 0x0
  3231. //
  3232. // https://msdn.microsoft.com/zh-tw/library/wf45zt2b(v=vs.100).aspx
  3233. FORCE_INLINE __m128i _mm_cmpgt_epi8(__m128i a, __m128i b)
  3234. {
  3235. return vreinterpretq_m128i_u8(
  3236. vcgtq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
  3237. }
  3238. // Compare packed double-precision (64-bit) floating-point elements in a and b
  3239. // for greater-than, and store the results in dst.
  3240. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_pd
  3241. FORCE_INLINE __m128d _mm_cmpgt_pd(__m128d a, __m128d b)
  3242. {
  3243. #if defined(__aarch64__)
  3244. return vreinterpretq_m128d_u64(
  3245. vcgtq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
  3246. #else
  3247. uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
  3248. uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
  3249. uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
  3250. uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
  3251. uint64_t d[2];
  3252. d[0] = (*(double *) &a0) > (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
  3253. d[1] = (*(double *) &a1) > (*(double *) &b1) ? ~UINT64_C(0) : UINT64_C(0);
  3254. return vreinterpretq_m128d_u64(vld1q_u64(d));
  3255. #endif
  3256. }
  3257. // Compare the lower double-precision (64-bit) floating-point elements in a and
  3258. // b for greater-than, store the result in the lower element of dst, and copy
  3259. // the upper element from a to the upper element of dst.
  3260. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_sd
  3261. FORCE_INLINE __m128d _mm_cmpgt_sd(__m128d a, __m128d b)
  3262. {
  3263. #if defined(__aarch64__)
  3264. return _mm_move_sd(a, _mm_cmpgt_pd(a, b));
  3265. #else
  3266. // expand "_mm_cmpge_pd()" to reduce unnecessary operations
  3267. uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
  3268. uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
  3269. uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
  3270. uint64_t d[2];
  3271. d[0] = (*(double *) &a0) > (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
  3272. d[1] = a1;
  3273. return vreinterpretq_m128d_u64(vld1q_u64(d));
  3274. #endif
  3275. }
  3276. // Compare packed double-precision (64-bit) floating-point elements in a and b
  3277. // for less-than-or-equal, and store the results in dst.
  3278. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_pd
  3279. FORCE_INLINE __m128d _mm_cmple_pd(__m128d a, __m128d b)
  3280. {
  3281. #if defined(__aarch64__)
  3282. return vreinterpretq_m128d_u64(
  3283. vcleq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
  3284. #else
  3285. uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
  3286. uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
  3287. uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
  3288. uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
  3289. uint64_t d[2];
  3290. d[0] = (*(double *) &a0) <= (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
  3291. d[1] = (*(double *) &a1) <= (*(double *) &b1) ? ~UINT64_C(0) : UINT64_C(0);
  3292. return vreinterpretq_m128d_u64(vld1q_u64(d));
  3293. #endif
  3294. }
  3295. // Compare the lower double-precision (64-bit) floating-point elements in a and
  3296. // b for less-than-or-equal, store the result in the lower element of dst, and
  3297. // copy the upper element from a to the upper element of dst.
  3298. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_sd
  3299. FORCE_INLINE __m128d _mm_cmple_sd(__m128d a, __m128d b)
  3300. {
  3301. #if defined(__aarch64__)
  3302. return _mm_move_sd(a, _mm_cmple_pd(a, b));
  3303. #else
  3304. // expand "_mm_cmpge_pd()" to reduce unnecessary operations
  3305. uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
  3306. uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
  3307. uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
  3308. uint64_t d[2];
  3309. d[0] = (*(double *) &a0) <= (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
  3310. d[1] = a1;
  3311. return vreinterpretq_m128d_u64(vld1q_u64(d));
  3312. #endif
  3313. }
  3314. // Compares the 8 signed 16-bit integers in a and the 8 signed 16-bit integers
  3315. // in b for less than.
  3316. //
  3317. // r0 := (a0 < b0) ? 0xffff : 0x0
  3318. // r1 := (a1 < b1) ? 0xffff : 0x0
  3319. // ...
  3320. // r7 := (a7 < b7) ? 0xffff : 0x0
  3321. //
  3322. // https://technet.microsoft.com/en-us/library/t863edb2(v=vs.100).aspx
  3323. FORCE_INLINE __m128i _mm_cmplt_epi16(__m128i a, __m128i b)
  3324. {
  3325. return vreinterpretq_m128i_u16(
  3326. vcltq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
  3327. }
  3328. // Compares the 4 signed 32-bit integers in a and the 4 signed 32-bit integers
  3329. // in b for less than.
  3330. // https://msdn.microsoft.com/en-us/library/vstudio/4ak0bf5d(v=vs.100).aspx
  3331. FORCE_INLINE __m128i _mm_cmplt_epi32(__m128i a, __m128i b)
  3332. {
  3333. return vreinterpretq_m128i_u32(
  3334. vcltq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
  3335. }
  3336. // Compares the 16 signed 8-bit integers in a and the 16 signed 8-bit integers
  3337. // in b for lesser than.
  3338. // https://msdn.microsoft.com/en-us/library/windows/desktop/9s46csht(v=vs.90).aspx
  3339. FORCE_INLINE __m128i _mm_cmplt_epi8(__m128i a, __m128i b)
  3340. {
  3341. return vreinterpretq_m128i_u8(
  3342. vcltq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
  3343. }
  3344. // Compare packed double-precision (64-bit) floating-point elements in a and b
  3345. // for less-than, and store the results in dst.
  3346. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_pd
  3347. FORCE_INLINE __m128d _mm_cmplt_pd(__m128d a, __m128d b)
  3348. {
  3349. #if defined(__aarch64__)
  3350. return vreinterpretq_m128d_u64(
  3351. vcltq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
  3352. #else
  3353. uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
  3354. uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
  3355. uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
  3356. uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
  3357. uint64_t d[2];
  3358. d[0] = (*(double *) &a0) < (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
  3359. d[1] = (*(double *) &a1) < (*(double *) &b1) ? ~UINT64_C(0) : UINT64_C(0);
  3360. return vreinterpretq_m128d_u64(vld1q_u64(d));
  3361. #endif
  3362. }
  3363. // Compare the lower double-precision (64-bit) floating-point elements in a and
  3364. // b for less-than, store the result in the lower element of dst, and copy the
  3365. // upper element from a to the upper element of dst.
  3366. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_sd
  3367. FORCE_INLINE __m128d _mm_cmplt_sd(__m128d a, __m128d b)
  3368. {
  3369. #if defined(__aarch64__)
  3370. return _mm_move_sd(a, _mm_cmplt_pd(a, b));
  3371. #else
  3372. uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
  3373. uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
  3374. uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
  3375. uint64_t d[2];
  3376. d[0] = (*(double *) &a0) < (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
  3377. d[1] = a1;
  3378. return vreinterpretq_m128d_u64(vld1q_u64(d));
  3379. #endif
  3380. }
  3381. // Compare packed double-precision (64-bit) floating-point elements in a and b
  3382. // for not-equal, and store the results in dst.
  3383. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_pd
  3384. FORCE_INLINE __m128d _mm_cmpneq_pd(__m128d a, __m128d b)
  3385. {
  3386. #if defined(__aarch64__)
  3387. return vreinterpretq_m128d_s32(vmvnq_s32(vreinterpretq_s32_u64(
  3388. vceqq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)))));
  3389. #else
  3390. // (a == b) -> (a_lo == b_lo) && (a_hi == b_hi)
  3391. uint32x4_t cmp =
  3392. vceqq_u32(vreinterpretq_u32_m128d(a), vreinterpretq_u32_m128d(b));
  3393. uint32x4_t swapped = vrev64q_u32(cmp);
  3394. return vreinterpretq_m128d_u32(vmvnq_u32(vandq_u32(cmp, swapped)));
  3395. #endif
  3396. }
  3397. // Compare the lower double-precision (64-bit) floating-point elements in a and
  3398. // b for not-equal, store the result in the lower element of dst, and copy the
  3399. // upper element from a to the upper element of dst.
  3400. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_sd
  3401. FORCE_INLINE __m128d _mm_cmpneq_sd(__m128d a, __m128d b)
  3402. {
  3403. return _mm_move_sd(a, _mm_cmpneq_pd(a, b));
  3404. }
  3405. // Compare packed double-precision (64-bit) floating-point elements in a and b
  3406. // for not-greater-than-or-equal, and store the results in dst.
  3407. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnge_pd
  3408. FORCE_INLINE __m128d _mm_cmpnge_pd(__m128d a, __m128d b)
  3409. {
  3410. #if defined(__aarch64__)
  3411. return vreinterpretq_m128d_u64(veorq_u64(
  3412. vcgeq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)),
  3413. vdupq_n_u64(UINT64_MAX)));
  3414. #else
  3415. uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
  3416. uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
  3417. uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
  3418. uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
  3419. uint64_t d[2];
  3420. d[0] =
  3421. !((*(double *) &a0) >= (*(double *) &b0)) ? ~UINT64_C(0) : UINT64_C(0);
  3422. d[1] =
  3423. !((*(double *) &a1) >= (*(double *) &b1)) ? ~UINT64_C(0) : UINT64_C(0);
  3424. return vreinterpretq_m128d_u64(vld1q_u64(d));
  3425. #endif
  3426. }
  3427. // Compare the lower double-precision (64-bit) floating-point elements in a and
  3428. // b for not-greater-than-or-equal, store the result in the lower element of
  3429. // dst, and copy the upper element from a to the upper element of dst.
  3430. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnge_sd
  3431. FORCE_INLINE __m128d _mm_cmpnge_sd(__m128d a, __m128d b)
  3432. {
  3433. return _mm_move_sd(a, _mm_cmpnge_pd(a, b));
  3434. }
  3435. // Compare packed double-precision (64-bit) floating-point elements in a and b
  3436. // for not-greater-than, and store the results in dst.
  3437. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_cmpngt_pd
  3438. FORCE_INLINE __m128d _mm_cmpngt_pd(__m128d a, __m128d b)
  3439. {
  3440. #if defined(__aarch64__)
  3441. return vreinterpretq_m128d_u64(veorq_u64(
  3442. vcgtq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)),
  3443. vdupq_n_u64(UINT64_MAX)));
  3444. #else
  3445. uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
  3446. uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
  3447. uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
  3448. uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
  3449. uint64_t d[2];
  3450. d[0] =
  3451. !((*(double *) &a0) > (*(double *) &b0)) ? ~UINT64_C(0) : UINT64_C(0);
  3452. d[1] =
  3453. !((*(double *) &a1) > (*(double *) &b1)) ? ~UINT64_C(0) : UINT64_C(0);
  3454. return vreinterpretq_m128d_u64(vld1q_u64(d));
  3455. #endif
  3456. }
  3457. // Compare the lower double-precision (64-bit) floating-point elements in a and
  3458. // b for not-greater-than, store the result in the lower element of dst, and
  3459. // copy the upper element from a to the upper element of dst.
  3460. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpngt_sd
  3461. FORCE_INLINE __m128d _mm_cmpngt_sd(__m128d a, __m128d b)
  3462. {
  3463. return _mm_move_sd(a, _mm_cmpngt_pd(a, b));
  3464. }
  3465. // Compare packed double-precision (64-bit) floating-point elements in a and b
  3466. // for not-less-than-or-equal, and store the results in dst.
  3467. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnle_pd
  3468. FORCE_INLINE __m128d _mm_cmpnle_pd(__m128d a, __m128d b)
  3469. {
  3470. #if defined(__aarch64__)
  3471. return vreinterpretq_m128d_u64(veorq_u64(
  3472. vcleq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)),
  3473. vdupq_n_u64(UINT64_MAX)));
  3474. #else
  3475. uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
  3476. uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
  3477. uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
  3478. uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
  3479. uint64_t d[2];
  3480. d[0] =
  3481. !((*(double *) &a0) <= (*(double *) &b0)) ? ~UINT64_C(0) : UINT64_C(0);
  3482. d[1] =
  3483. !((*(double *) &a1) <= (*(double *) &b1)) ? ~UINT64_C(0) : UINT64_C(0);
  3484. return vreinterpretq_m128d_u64(vld1q_u64(d));
  3485. #endif
  3486. }
  3487. // Compare the lower double-precision (64-bit) floating-point elements in a and
  3488. // b for not-less-than-or-equal, store the result in the lower element of dst,
  3489. // and copy the upper element from a to the upper element of dst.
  3490. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnle_sd
  3491. FORCE_INLINE __m128d _mm_cmpnle_sd(__m128d a, __m128d b)
  3492. {
  3493. return _mm_move_sd(a, _mm_cmpnle_pd(a, b));
  3494. }
  3495. // Compare packed double-precision (64-bit) floating-point elements in a and b
  3496. // for not-less-than, and store the results in dst.
  3497. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnlt_pd
  3498. FORCE_INLINE __m128d _mm_cmpnlt_pd(__m128d a, __m128d b)
  3499. {
  3500. #if defined(__aarch64__)
  3501. return vreinterpretq_m128d_u64(veorq_u64(
  3502. vcltq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)),
  3503. vdupq_n_u64(UINT64_MAX)));
  3504. #else
  3505. uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
  3506. uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
  3507. uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
  3508. uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
  3509. uint64_t d[2];
  3510. d[0] =
  3511. !((*(double *) &a0) < (*(double *) &b0)) ? ~UINT64_C(0) : UINT64_C(0);
  3512. d[1] =
  3513. !((*(double *) &a1) < (*(double *) &b1)) ? ~UINT64_C(0) : UINT64_C(0);
  3514. return vreinterpretq_m128d_u64(vld1q_u64(d));
  3515. #endif
  3516. }
  3517. // Compare the lower double-precision (64-bit) floating-point elements in a and
  3518. // b for not-less-than, store the result in the lower element of dst, and copy
  3519. // the upper element from a to the upper element of dst.
  3520. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnlt_sd
  3521. FORCE_INLINE __m128d _mm_cmpnlt_sd(__m128d a, __m128d b)
  3522. {
  3523. return _mm_move_sd(a, _mm_cmpnlt_pd(a, b));
  3524. }
  3525. // Compare packed double-precision (64-bit) floating-point elements in a and b
  3526. // to see if neither is NaN, and store the results in dst.
  3527. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpord_pd
  3528. FORCE_INLINE __m128d _mm_cmpord_pd(__m128d a, __m128d b)
  3529. {
  3530. #if defined(__aarch64__)
  3531. // Excluding NaNs, any two floating point numbers can be compared.
  3532. uint64x2_t not_nan_a =
  3533. vceqq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(a));
  3534. uint64x2_t not_nan_b =
  3535. vceqq_f64(vreinterpretq_f64_m128d(b), vreinterpretq_f64_m128d(b));
  3536. return vreinterpretq_m128d_u64(vandq_u64(not_nan_a, not_nan_b));
  3537. #else
  3538. uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
  3539. uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
  3540. uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
  3541. uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
  3542. uint64_t d[2];
  3543. d[0] = ((*(double *) &a0) == (*(double *) &a0) &&
  3544. (*(double *) &b0) == (*(double *) &b0))
  3545. ? ~UINT64_C(0)
  3546. : UINT64_C(0);
  3547. d[1] = ((*(double *) &a1) == (*(double *) &a1) &&
  3548. (*(double *) &b1) == (*(double *) &b1))
  3549. ? ~UINT64_C(0)
  3550. : UINT64_C(0);
  3551. return vreinterpretq_m128d_u64(vld1q_u64(d));
  3552. #endif
  3553. }
  3554. // Compare the lower double-precision (64-bit) floating-point elements in a and
  3555. // b to see if neither is NaN, store the result in the lower element of dst, and
  3556. // copy the upper element from a to the upper element of dst.
  3557. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpord_sd
  3558. FORCE_INLINE __m128d _mm_cmpord_sd(__m128d a, __m128d b)
  3559. {
  3560. #if defined(__aarch64__)
  3561. return _mm_move_sd(a, _mm_cmpord_pd(a, b));
  3562. #else
  3563. uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
  3564. uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
  3565. uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
  3566. uint64_t d[2];
  3567. d[0] = ((*(double *) &a0) == (*(double *) &a0) &&
  3568. (*(double *) &b0) == (*(double *) &b0))
  3569. ? ~UINT64_C(0)
  3570. : UINT64_C(0);
  3571. d[1] = a1;
  3572. return vreinterpretq_m128d_u64(vld1q_u64(d));
  3573. #endif
  3574. }
  3575. // Compare packed double-precision (64-bit) floating-point elements in a and b
  3576. // to see if either is NaN, and store the results in dst.
  3577. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpunord_pd
  3578. FORCE_INLINE __m128d _mm_cmpunord_pd(__m128d a, __m128d b)
  3579. {
  3580. #if defined(__aarch64__)
  3581. // Two NaNs are not equal in comparison operation.
  3582. uint64x2_t not_nan_a =
  3583. vceqq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(a));
  3584. uint64x2_t not_nan_b =
  3585. vceqq_f64(vreinterpretq_f64_m128d(b), vreinterpretq_f64_m128d(b));
  3586. return vreinterpretq_m128d_s32(
  3587. vmvnq_s32(vreinterpretq_s32_u64(vandq_u64(not_nan_a, not_nan_b))));
  3588. #else
  3589. uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
  3590. uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
  3591. uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
  3592. uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
  3593. uint64_t d[2];
  3594. d[0] = ((*(double *) &a0) == (*(double *) &a0) &&
  3595. (*(double *) &b0) == (*(double *) &b0))
  3596. ? UINT64_C(0)
  3597. : ~UINT64_C(0);
  3598. d[1] = ((*(double *) &a1) == (*(double *) &a1) &&
  3599. (*(double *) &b1) == (*(double *) &b1))
  3600. ? UINT64_C(0)
  3601. : ~UINT64_C(0);
  3602. return vreinterpretq_m128d_u64(vld1q_u64(d));
  3603. #endif
  3604. }
  3605. // Compare the lower double-precision (64-bit) floating-point elements in a and
  3606. // b to see if either is NaN, store the result in the lower element of dst, and
  3607. // copy the upper element from a to the upper element of dst.
  3608. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpunord_sd
  3609. FORCE_INLINE __m128d _mm_cmpunord_sd(__m128d a, __m128d b)
  3610. {
  3611. #if defined(__aarch64__)
  3612. return _mm_move_sd(a, _mm_cmpunord_pd(a, b));
  3613. #else
  3614. uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
  3615. uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
  3616. uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
  3617. uint64_t d[2];
  3618. d[0] = ((*(double *) &a0) == (*(double *) &a0) &&
  3619. (*(double *) &b0) == (*(double *) &b0))
  3620. ? UINT64_C(0)
  3621. : ~UINT64_C(0);
  3622. d[1] = a1;
  3623. return vreinterpretq_m128d_u64(vld1q_u64(d));
  3624. #endif
  3625. }
  3626. // Compare the lower double-precision (64-bit) floating-point element in a and b
  3627. // for greater-than-or-equal, and return the boolean result (0 or 1).
  3628. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comige_sd
  3629. FORCE_INLINE int _mm_comige_sd(__m128d a, __m128d b)
  3630. {
  3631. #if defined(__aarch64__)
  3632. return vgetq_lane_u64(vcgeq_f64(a, b), 0) & 0x1;
  3633. #else
  3634. uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
  3635. uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
  3636. return (*(double *) &a0 >= *(double *) &b0);
  3637. #endif
  3638. }
  3639. // Compare the lower double-precision (64-bit) floating-point element in a and b
  3640. // for greater-than, and return the boolean result (0 or 1).
  3641. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comigt_sd
  3642. FORCE_INLINE int _mm_comigt_sd(__m128d a, __m128d b)
  3643. {
  3644. #if defined(__aarch64__)
  3645. return vgetq_lane_u64(vcgtq_f64(a, b), 0) & 0x1;
  3646. #else
  3647. uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
  3648. uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
  3649. return (*(double *) &a0 > *(double *) &b0);
  3650. #endif
  3651. }
  3652. // Compare the lower double-precision (64-bit) floating-point element in a and b
  3653. // for less-than-or-equal, and return the boolean result (0 or 1).
  3654. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comile_sd
  3655. FORCE_INLINE int _mm_comile_sd(__m128d a, __m128d b)
  3656. {
  3657. #if defined(__aarch64__)
  3658. return vgetq_lane_u64(vcleq_f64(a, b), 0) & 0x1;
  3659. #else
  3660. uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
  3661. uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
  3662. return (*(double *) &a0 <= *(double *) &b0);
  3663. #endif
  3664. }
  3665. // Compare the lower double-precision (64-bit) floating-point element in a and b
  3666. // for less-than, and return the boolean result (0 or 1).
  3667. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comilt_sd
  3668. FORCE_INLINE int _mm_comilt_sd(__m128d a, __m128d b)
  3669. {
  3670. #if defined(__aarch64__)
  3671. return vgetq_lane_u64(vcltq_f64(a, b), 0) & 0x1;
  3672. #else
  3673. uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
  3674. uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
  3675. return (*(double *) &a0 < *(double *) &b0);
  3676. #endif
  3677. }
  3678. // Compare the lower double-precision (64-bit) floating-point element in a and b
  3679. // for equality, and return the boolean result (0 or 1).
  3680. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comieq_sd
  3681. FORCE_INLINE int _mm_comieq_sd(__m128d a, __m128d b)
  3682. {
  3683. #if defined(__aarch64__)
  3684. return vgetq_lane_u64(vceqq_f64(a, b), 0) & 0x1;
  3685. #else
  3686. uint32x4_t a_not_nan =
  3687. vceqq_u32(vreinterpretq_u32_m128d(a), vreinterpretq_u32_m128d(a));
  3688. uint32x4_t b_not_nan =
  3689. vceqq_u32(vreinterpretq_u32_m128d(b), vreinterpretq_u32_m128d(b));
  3690. uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
  3691. uint32x4_t a_eq_b =
  3692. vceqq_u32(vreinterpretq_u32_m128d(a), vreinterpretq_u32_m128d(b));
  3693. uint64x2_t and_results = vandq_u64(vreinterpretq_u64_u32(a_and_b_not_nan),
  3694. vreinterpretq_u64_u32(a_eq_b));
  3695. return vgetq_lane_u64(and_results, 0) & 0x1;
  3696. #endif
  3697. }
  3698. // Compare the lower double-precision (64-bit) floating-point element in a and b
  3699. // for not-equal, and return the boolean result (0 or 1).
  3700. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comineq_sd
  3701. FORCE_INLINE int _mm_comineq_sd(__m128d a, __m128d b)
  3702. {
  3703. return !_mm_comieq_sd(a, b);
  3704. }
  3705. // Convert packed signed 32-bit integers in a to packed double-precision
  3706. // (64-bit) floating-point elements, and store the results in dst.
  3707. //
  3708. // FOR j := 0 to 1
  3709. // i := j*32
  3710. // m := j*64
  3711. // dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i])
  3712. // ENDFOR
  3713. //
  3714. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi32_pd
  3715. FORCE_INLINE __m128d _mm_cvtepi32_pd(__m128i a)
  3716. {
  3717. #if defined(__aarch64__)
  3718. return vreinterpretq_m128d_f64(
  3719. vcvtq_f64_s64(vmovl_s32(vget_low_s32(vreinterpretq_s32_m128i(a)))));
  3720. #else
  3721. double a0 = (double) vgetq_lane_s32(vreinterpretq_s32_m128i(a), 0);
  3722. double a1 = (double) vgetq_lane_s32(vreinterpretq_s32_m128i(a), 1);
  3723. return _mm_set_pd(a1, a0);
  3724. #endif
  3725. }
  3726. // Converts the four signed 32-bit integer values of a to single-precision,
  3727. // floating-point values
  3728. // https://msdn.microsoft.com/en-us/library/vstudio/36bwxcx5(v=vs.100).aspx
  3729. FORCE_INLINE __m128 _mm_cvtepi32_ps(__m128i a)
  3730. {
  3731. return vreinterpretq_m128_f32(vcvtq_f32_s32(vreinterpretq_s32_m128i(a)));
  3732. }
  3733. // Convert packed double-precision (64-bit) floating-point elements in a to
  3734. // packed 32-bit integers, and store the results in dst.
  3735. //
  3736. // FOR j := 0 to 1
  3737. // i := 32*j
  3738. // k := 64*j
  3739. // dst[i+31:i] := Convert_FP64_To_Int32(a[k+63:k])
  3740. // ENDFOR
  3741. //
  3742. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpd_epi32
  3743. FORCE_INLINE __m128i _mm_cvtpd_epi32(__m128d a)
  3744. {
  3745. // vrnd32xq_f64 not supported on clang
  3746. #if defined(__ARM_FEATURE_FRINT) && !defined(__clang__)
  3747. float64x2_t rounded = vrnd32xq_f64(vreinterpretq_f64_m128d(a));
  3748. int64x2_t integers = vcvtq_s64_f64(rounded);
  3749. return vreinterpretq_m128i_s32(
  3750. vcombine_s32(vmovn_s64(integers), vdup_n_s32(0)));
  3751. #else
  3752. __m128d rnd = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION);
  3753. double d0 = ((double *) &rnd)[0];
  3754. double d1 = ((double *) &rnd)[1];
  3755. return _mm_set_epi32(0, 0, (int32_t) d1, (int32_t) d0);
  3756. #endif
  3757. }
  3758. // Convert packed double-precision (64-bit) floating-point elements in a to
  3759. // packed 32-bit integers, and store the results in dst.
  3760. //
  3761. // FOR j := 0 to 1
  3762. // i := 32*j
  3763. // k := 64*j
  3764. // dst[i+31:i] := Convert_FP64_To_Int32(a[k+63:k])
  3765. // ENDFOR
  3766. //
  3767. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpd_pi32
  3768. FORCE_INLINE __m64 _mm_cvtpd_pi32(__m128d a)
  3769. {
  3770. __m128d rnd = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION);
  3771. double d0 = ((double *) &rnd)[0];
  3772. double d1 = ((double *) &rnd)[1];
  3773. int32_t ALIGN_STRUCT(16) data[2] = {(int32_t) d0, (int32_t) d1};
  3774. return vreinterpret_m64_s32(vld1_s32(data));
  3775. }
  3776. // Convert packed double-precision (64-bit) floating-point elements in a to
  3777. // packed single-precision (32-bit) floating-point elements, and store the
  3778. // results in dst.
  3779. //
  3780. // FOR j := 0 to 1
  3781. // i := 32*j
  3782. // k := 64*j
  3783. // dst[i+31:i] := Convert_FP64_To_FP32(a[k+64:k])
  3784. // ENDFOR
  3785. // dst[127:64] := 0
  3786. //
  3787. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpd_ps
  3788. FORCE_INLINE __m128 _mm_cvtpd_ps(__m128d a)
  3789. {
  3790. #if defined(__aarch64__)
  3791. float32x2_t tmp = vcvt_f32_f64(vreinterpretq_f64_m128d(a));
  3792. return vreinterpretq_m128_f32(vcombine_f32(tmp, vdup_n_f32(0)));
  3793. #else
  3794. float a0 = (float) ((double *) &a)[0];
  3795. float a1 = (float) ((double *) &a)[1];
  3796. return _mm_set_ps(0, 0, a1, a0);
  3797. #endif
  3798. }
  3799. // Convert packed signed 32-bit integers in a to packed double-precision
  3800. // (64-bit) floating-point elements, and store the results in dst.
  3801. //
  3802. // FOR j := 0 to 1
  3803. // i := j*32
  3804. // m := j*64
  3805. // dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i])
  3806. // ENDFOR
  3807. //
  3808. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpi32_pd
  3809. FORCE_INLINE __m128d _mm_cvtpi32_pd(__m64 a)
  3810. {
  3811. #if defined(__aarch64__)
  3812. return vreinterpretq_m128d_f64(
  3813. vcvtq_f64_s64(vmovl_s32(vreinterpret_s32_m64(a))));
  3814. #else
  3815. double a0 = (double) vget_lane_s32(vreinterpret_s32_m64(a), 0);
  3816. double a1 = (double) vget_lane_s32(vreinterpret_s32_m64(a), 1);
  3817. return _mm_set_pd(a1, a0);
  3818. #endif
  3819. }
  3820. // Converts the four single-precision, floating-point values of a to signed
  3821. // 32-bit integer values.
  3822. //
  3823. // r0 := (int) a0
  3824. // r1 := (int) a1
  3825. // r2 := (int) a2
  3826. // r3 := (int) a3
  3827. //
  3828. // https://msdn.microsoft.com/en-us/library/vstudio/xdc42k5e(v=vs.100).aspx
  3829. // *NOTE*. The default rounding mode on SSE is 'round to even', which ARMv7-A
  3830. // does not support! It is supported on ARMv8-A however.
  3831. FORCE_INLINE __m128i _mm_cvtps_epi32(__m128 a)
  3832. {
  3833. #if defined(__ARM_FEATURE_FRINT)
  3834. return vreinterpretq_m128i_s32(vcvtq_s32_f32(vrnd32xq_f32(a)));
  3835. #elif defined(__aarch64__) || defined(__ARM_FEATURE_DIRECTED_ROUNDING)
  3836. switch (_MM_GET_ROUNDING_MODE()) {
  3837. case _MM_ROUND_NEAREST:
  3838. return vreinterpretq_m128i_s32(vcvtnq_s32_f32(a));
  3839. case _MM_ROUND_DOWN:
  3840. return vreinterpretq_m128i_s32(vcvtmq_s32_f32(a));
  3841. case _MM_ROUND_UP:
  3842. return vreinterpretq_m128i_s32(vcvtpq_s32_f32(a));
  3843. default: // _MM_ROUND_TOWARD_ZERO
  3844. return vreinterpretq_m128i_s32(vcvtq_s32_f32(a));
  3845. }
  3846. #else
  3847. float *f = (float *) &a;
  3848. switch (_MM_GET_ROUNDING_MODE()) {
  3849. case _MM_ROUND_NEAREST: {
  3850. uint32x4_t signmask = vdupq_n_u32(0x80000000);
  3851. float32x4_t half = vbslq_f32(signmask, vreinterpretq_f32_m128(a),
  3852. vdupq_n_f32(0.5f)); /* +/- 0.5 */
  3853. int32x4_t r_normal = vcvtq_s32_f32(vaddq_f32(
  3854. vreinterpretq_f32_m128(a), half)); /* round to integer: [a + 0.5]*/
  3855. int32x4_t r_trunc = vcvtq_s32_f32(
  3856. vreinterpretq_f32_m128(a)); /* truncate to integer: [a] */
  3857. int32x4_t plusone = vreinterpretq_s32_u32(vshrq_n_u32(
  3858. vreinterpretq_u32_s32(vnegq_s32(r_trunc)), 31)); /* 1 or 0 */
  3859. int32x4_t r_even = vbicq_s32(vaddq_s32(r_trunc, plusone),
  3860. vdupq_n_s32(1)); /* ([a] + {0,1}) & ~1 */
  3861. float32x4_t delta = vsubq_f32(
  3862. vreinterpretq_f32_m128(a),
  3863. vcvtq_f32_s32(r_trunc)); /* compute delta: delta = (a - [a]) */
  3864. uint32x4_t is_delta_half =
  3865. vceqq_f32(delta, half); /* delta == +/- 0.5 */
  3866. return vreinterpretq_m128i_s32(
  3867. vbslq_s32(is_delta_half, r_even, r_normal));
  3868. }
  3869. case _MM_ROUND_DOWN:
  3870. return _mm_set_epi32(floorf(f[3]), floorf(f[2]), floorf(f[1]),
  3871. floorf(f[0]));
  3872. case _MM_ROUND_UP:
  3873. return _mm_set_epi32(ceilf(f[3]), ceilf(f[2]), ceilf(f[1]),
  3874. ceilf(f[0]));
  3875. default: // _MM_ROUND_TOWARD_ZERO
  3876. return _mm_set_epi32((int32_t) f[3], (int32_t) f[2], (int32_t) f[1],
  3877. (int32_t) f[0]);
  3878. }
  3879. #endif
  3880. }
  3881. // Convert packed single-precision (32-bit) floating-point elements in a to
  3882. // packed double-precision (64-bit) floating-point elements, and store the
  3883. // results in dst.
  3884. //
  3885. // FOR j := 0 to 1
  3886. // i := 64*j
  3887. // k := 32*j
  3888. // dst[i+63:i] := Convert_FP32_To_FP64(a[k+31:k])
  3889. // ENDFOR
  3890. //
  3891. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtps_pd
  3892. FORCE_INLINE __m128d _mm_cvtps_pd(__m128 a)
  3893. {
  3894. #if defined(__aarch64__)
  3895. return vreinterpretq_m128d_f64(
  3896. vcvt_f64_f32(vget_low_f32(vreinterpretq_f32_m128(a))));
  3897. #else
  3898. double a0 = (double) vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
  3899. double a1 = (double) vgetq_lane_f32(vreinterpretq_f32_m128(a), 1);
  3900. return _mm_set_pd(a1, a0);
  3901. #endif
  3902. }
  3903. // Copy the lower double-precision (64-bit) floating-point element of a to dst.
  3904. //
  3905. // dst[63:0] := a[63:0]
  3906. //
  3907. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_f64
  3908. FORCE_INLINE double _mm_cvtsd_f64(__m128d a)
  3909. {
  3910. #if defined(__aarch64__)
  3911. return (double) vgetq_lane_f64(vreinterpretq_f64_m128d(a), 0);
  3912. #else
  3913. return ((double *) &a)[0];
  3914. #endif
  3915. }
  3916. // Convert the lower double-precision (64-bit) floating-point element in a to a
  3917. // 32-bit integer, and store the result in dst.
  3918. //
  3919. // dst[31:0] := Convert_FP64_To_Int32(a[63:0])
  3920. //
  3921. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_si32
  3922. FORCE_INLINE int32_t _mm_cvtsd_si32(__m128d a)
  3923. {
  3924. #if defined(__aarch64__)
  3925. return (int32_t) vgetq_lane_f64(vrndiq_f64(vreinterpretq_f64_m128d(a)), 0);
  3926. #else
  3927. __m128d rnd = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION);
  3928. double ret = ((double *) &rnd)[0];
  3929. return (int32_t) ret;
  3930. #endif
  3931. }
  3932. // Convert the lower double-precision (64-bit) floating-point element in a to a
  3933. // 64-bit integer, and store the result in dst.
  3934. //
  3935. // dst[63:0] := Convert_FP64_To_Int64(a[63:0])
  3936. //
  3937. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_si64
  3938. FORCE_INLINE int64_t _mm_cvtsd_si64(__m128d a)
  3939. {
  3940. #if defined(__aarch64__)
  3941. return (int64_t) vgetq_lane_f64(vrndiq_f64(vreinterpretq_f64_m128d(a)), 0);
  3942. #else
  3943. __m128d rnd = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION);
  3944. double ret = ((double *) &rnd)[0];
  3945. return (int64_t) ret;
  3946. #endif
  3947. }
  3948. // Convert the lower double-precision (64-bit) floating-point element in a to a
  3949. // 64-bit integer, and store the result in dst.
  3950. //
  3951. // dst[63:0] := Convert_FP64_To_Int64(a[63:0])
  3952. //
  3953. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_si64x
  3954. #define _mm_cvtsd_si64x _mm_cvtsd_si64
  3955. // Convert the lower double-precision (64-bit) floating-point element in b to a
  3956. // single-precision (32-bit) floating-point element, store the result in the
  3957. // lower element of dst, and copy the upper 3 packed elements from a to the
  3958. // upper elements of dst.
  3959. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_ss
  3960. FORCE_INLINE __m128 _mm_cvtsd_ss(__m128 a, __m128d b)
  3961. {
  3962. #if defined(__aarch64__)
  3963. return vreinterpretq_m128_f32(vsetq_lane_f32(
  3964. vget_lane_f32(vcvt_f32_f64(vreinterpretq_f64_m128d(b)), 0),
  3965. vreinterpretq_f32_m128(a), 0));
  3966. #else
  3967. return vreinterpretq_m128_f32(vsetq_lane_f32((float) ((double *) &b)[0],
  3968. vreinterpretq_f32_m128(a), 0));
  3969. #endif
  3970. }
  3971. // Copy the lower 32-bit integer in a to dst.
  3972. //
  3973. // dst[31:0] := a[31:0]
  3974. //
  3975. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi128_si32
  3976. FORCE_INLINE int _mm_cvtsi128_si32(__m128i a)
  3977. {
  3978. return vgetq_lane_s32(vreinterpretq_s32_m128i(a), 0);
  3979. }
  3980. // Copy the lower 64-bit integer in a to dst.
  3981. //
  3982. // dst[63:0] := a[63:0]
  3983. //
  3984. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi128_si64
  3985. FORCE_INLINE int64_t _mm_cvtsi128_si64(__m128i a)
  3986. {
  3987. return vgetq_lane_s64(vreinterpretq_s64_m128i(a), 0);
  3988. }
  3989. // Copy the lower 64-bit integer in a to dst.
  3990. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi128_si64x
  3991. #define _mm_cvtsi128_si64x(a) _mm_cvtsi128_si64(a)
  3992. // Convert the signed 32-bit integer b to a double-precision (64-bit)
  3993. // floating-point element, store the result in the lower element of dst, and
  3994. // copy the upper element from a to the upper element of dst.
  3995. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi32_sd
  3996. FORCE_INLINE __m128d _mm_cvtsi32_sd(__m128d a, int32_t b)
  3997. {
  3998. #if defined(__aarch64__)
  3999. return vreinterpretq_m128d_f64(
  4000. vsetq_lane_f64((double) b, vreinterpretq_f64_m128d(a), 0));
  4001. #else
  4002. double bf = (double) b;
  4003. return vreinterpretq_m128d_s64(
  4004. vsetq_lane_s64(*(int64_t *) &bf, vreinterpretq_s64_m128d(a), 0));
  4005. #endif
  4006. }
  4007. // Copy the lower 64-bit integer in a to dst.
  4008. //
  4009. // dst[63:0] := a[63:0]
  4010. //
  4011. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi128_si64x
  4012. #define _mm_cvtsi128_si64x(a) _mm_cvtsi128_si64(a)
  4013. // Moves 32-bit integer a to the least significant 32 bits of an __m128 object,
  4014. // zero extending the upper bits.
  4015. //
  4016. // r0 := a
  4017. // r1 := 0x0
  4018. // r2 := 0x0
  4019. // r3 := 0x0
  4020. //
  4021. // https://msdn.microsoft.com/en-us/library/ct3539ha%28v=vs.90%29.aspx
  4022. FORCE_INLINE __m128i _mm_cvtsi32_si128(int a)
  4023. {
  4024. return vreinterpretq_m128i_s32(vsetq_lane_s32(a, vdupq_n_s32(0), 0));
  4025. }
  4026. // Convert the signed 64-bit integer b to a double-precision (64-bit)
  4027. // floating-point element, store the result in the lower element of dst, and
  4028. // copy the upper element from a to the upper element of dst.
  4029. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi64_sd
  4030. FORCE_INLINE __m128d _mm_cvtsi64_sd(__m128d a, int64_t b)
  4031. {
  4032. #if defined(__aarch64__)
  4033. return vreinterpretq_m128d_f64(
  4034. vsetq_lane_f64((double) b, vreinterpretq_f64_m128d(a), 0));
  4035. #else
  4036. double bf = (double) b;
  4037. return vreinterpretq_m128d_s64(
  4038. vsetq_lane_s64(*(int64_t *) &bf, vreinterpretq_s64_m128d(a), 0));
  4039. #endif
  4040. }
  4041. // Moves 64-bit integer a to the least significant 64 bits of an __m128 object,
  4042. // zero extending the upper bits.
  4043. //
  4044. // r0 := a
  4045. // r1 := 0x0
  4046. FORCE_INLINE __m128i _mm_cvtsi64_si128(int64_t a)
  4047. {
  4048. return vreinterpretq_m128i_s64(vsetq_lane_s64(a, vdupq_n_s64(0), 0));
  4049. }
  4050. // Copy 64-bit integer a to the lower element of dst, and zero the upper
  4051. // element.
  4052. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi64x_si128
  4053. #define _mm_cvtsi64x_si128(a) _mm_cvtsi64_si128(a)
  4054. // Convert the signed 64-bit integer b to a double-precision (64-bit)
  4055. // floating-point element, store the result in the lower element of dst, and
  4056. // copy the upper element from a to the upper element of dst.
  4057. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi64x_sd
  4058. #define _mm_cvtsi64x_sd(a, b) _mm_cvtsi64_sd(a, b)
  4059. // Convert the lower single-precision (32-bit) floating-point element in b to a
  4060. // double-precision (64-bit) floating-point element, store the result in the
  4061. // lower element of dst, and copy the upper element from a to the upper element
  4062. // of dst.
  4063. //
  4064. // dst[63:0] := Convert_FP32_To_FP64(b[31:0])
  4065. // dst[127:64] := a[127:64]
  4066. //
  4067. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtss_sd
  4068. FORCE_INLINE __m128d _mm_cvtss_sd(__m128d a, __m128 b)
  4069. {
  4070. double d = (double) vgetq_lane_f32(vreinterpretq_f32_m128(b), 0);
  4071. #if defined(__aarch64__)
  4072. return vreinterpretq_m128d_f64(
  4073. vsetq_lane_f64(d, vreinterpretq_f64_m128d(a), 0));
  4074. #else
  4075. return vreinterpretq_m128d_s64(
  4076. vsetq_lane_s64(*(int64_t *) &d, vreinterpretq_s64_m128d(a), 0));
  4077. #endif
  4078. }
  4079. // Convert packed double-precision (64-bit) floating-point elements in a to
  4080. // packed 32-bit integers with truncation, and store the results in dst.
  4081. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttpd_epi32
  4082. FORCE_INLINE __m128i _mm_cvttpd_epi32(__m128d a)
  4083. {
  4084. double a0 = ((double *) &a)[0];
  4085. double a1 = ((double *) &a)[1];
  4086. return _mm_set_epi32(0, 0, (int32_t) a1, (int32_t) a0);
  4087. }
  4088. // Convert packed double-precision (64-bit) floating-point elements in a to
  4089. // packed 32-bit integers with truncation, and store the results in dst.
  4090. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttpd_pi32
  4091. FORCE_INLINE __m64 _mm_cvttpd_pi32(__m128d a)
  4092. {
  4093. double a0 = ((double *) &a)[0];
  4094. double a1 = ((double *) &a)[1];
  4095. int32_t ALIGN_STRUCT(16) data[2] = {(int32_t) a0, (int32_t) a1};
  4096. return vreinterpret_m64_s32(vld1_s32(data));
  4097. }
  4098. // Converts the four single-precision, floating-point values of a to signed
  4099. // 32-bit integer values using truncate.
  4100. // https://msdn.microsoft.com/en-us/library/vstudio/1h005y6x(v=vs.100).aspx
  4101. FORCE_INLINE __m128i _mm_cvttps_epi32(__m128 a)
  4102. {
  4103. return vreinterpretq_m128i_s32(vcvtq_s32_f32(vreinterpretq_f32_m128(a)));
  4104. }
  4105. // Convert the lower double-precision (64-bit) floating-point element in a to a
  4106. // 32-bit integer with truncation, and store the result in dst.
  4107. //
  4108. // dst[63:0] := Convert_FP64_To_Int32_Truncate(a[63:0])
  4109. //
  4110. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttsd_si32
  4111. FORCE_INLINE int32_t _mm_cvttsd_si32(__m128d a)
  4112. {
  4113. double ret = *((double *) &a);
  4114. return (int32_t) ret;
  4115. }
  4116. // Convert the lower double-precision (64-bit) floating-point element in a to a
  4117. // 64-bit integer with truncation, and store the result in dst.
  4118. //
  4119. // dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0])
  4120. //
  4121. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttsd_si64
  4122. FORCE_INLINE int64_t _mm_cvttsd_si64(__m128d a)
  4123. {
  4124. #if defined(__aarch64__)
  4125. return vgetq_lane_s64(vcvtq_s64_f64(vreinterpretq_f64_m128d(a)), 0);
  4126. #else
  4127. double ret = *((double *) &a);
  4128. return (int64_t) ret;
  4129. #endif
  4130. }
  4131. // Convert the lower double-precision (64-bit) floating-point element in a to a
  4132. // 64-bit integer with truncation, and store the result in dst.
  4133. //
  4134. // dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0])
  4135. //
  4136. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttsd_si64x
  4137. #define _mm_cvttsd_si64x(a) _mm_cvttsd_si64(a)
  4138. // Divide packed double-precision (64-bit) floating-point elements in a by
  4139. // packed elements in b, and store the results in dst.
  4140. //
  4141. // FOR j := 0 to 1
  4142. // i := 64*j
  4143. // dst[i+63:i] := a[i+63:i] / b[i+63:i]
  4144. // ENDFOR
  4145. //
  4146. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_div_pd
  4147. FORCE_INLINE __m128d _mm_div_pd(__m128d a, __m128d b)
  4148. {
  4149. #if defined(__aarch64__)
  4150. return vreinterpretq_m128d_f64(
  4151. vdivq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
  4152. #else
  4153. double *da = (double *) &a;
  4154. double *db = (double *) &b;
  4155. double c[2];
  4156. c[0] = da[0] / db[0];
  4157. c[1] = da[1] / db[1];
  4158. return vld1q_f32((float32_t *) c);
  4159. #endif
  4160. }
  4161. // Divide the lower double-precision (64-bit) floating-point element in a by the
  4162. // lower double-precision (64-bit) floating-point element in b, store the result
  4163. // in the lower element of dst, and copy the upper element from a to the upper
  4164. // element of dst.
  4165. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_div_sd
  4166. FORCE_INLINE __m128d _mm_div_sd(__m128d a, __m128d b)
  4167. {
  4168. #if defined(__aarch64__)
  4169. float64x2_t tmp =
  4170. vdivq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b));
  4171. return vreinterpretq_m128d_f64(
  4172. vsetq_lane_f64(vgetq_lane_f64(vreinterpretq_f64_m128d(a), 1), tmp, 1));
  4173. #else
  4174. return _mm_move_sd(a, _mm_div_pd(a, b));
  4175. #endif
  4176. }
  4177. // Extracts the selected signed or unsigned 16-bit integer from a and zero
  4178. // extends.
  4179. // https://msdn.microsoft.com/en-us/library/6dceta0c(v=vs.100).aspx
  4180. // FORCE_INLINE int _mm_extract_epi16(__m128i a, __constrange(0,8) int imm)
  4181. #define _mm_extract_epi16(a, imm) \
  4182. vgetq_lane_u16(vreinterpretq_u16_m128i(a), (imm))
  4183. // Inserts the least significant 16 bits of b into the selected 16-bit integer
  4184. // of a.
  4185. // https://msdn.microsoft.com/en-us/library/kaze8hz1%28v=vs.100%29.aspx
  4186. // FORCE_INLINE __m128i _mm_insert_epi16(__m128i a, int b,
  4187. // __constrange(0,8) int imm)
  4188. #define _mm_insert_epi16(a, b, imm) \
  4189. __extension__({ \
  4190. vreinterpretq_m128i_s16( \
  4191. vsetq_lane_s16((b), vreinterpretq_s16_m128i(a), (imm))); \
  4192. })
  4193. // Loads two double-precision from 16-byte aligned memory, floating-point
  4194. // values.
  4195. //
  4196. // dst[127:0] := MEM[mem_addr+127:mem_addr]
  4197. //
  4198. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_pd
  4199. FORCE_INLINE __m128d _mm_load_pd(const double *p)
  4200. {
  4201. #if defined(__aarch64__)
  4202. return vreinterpretq_m128d_f64(vld1q_f64(p));
  4203. #else
  4204. const float *fp = (const float *) p;
  4205. float ALIGN_STRUCT(16) data[4] = {fp[0], fp[1], fp[2], fp[3]};
  4206. return vreinterpretq_m128d_f32(vld1q_f32(data));
  4207. #endif
  4208. }
  4209. // Load a double-precision (64-bit) floating-point element from memory into both
  4210. // elements of dst.
  4211. //
  4212. // dst[63:0] := MEM[mem_addr+63:mem_addr]
  4213. // dst[127:64] := MEM[mem_addr+63:mem_addr]
  4214. //
  4215. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_pd1
  4216. #define _mm_load_pd1 _mm_load1_pd
  4217. // Load a double-precision (64-bit) floating-point element from memory into the
  4218. // lower of dst, and zero the upper element. mem_addr does not need to be
  4219. // aligned on any particular boundary.
  4220. //
  4221. // dst[63:0] := MEM[mem_addr+63:mem_addr]
  4222. // dst[127:64] := 0
  4223. //
  4224. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_sd
  4225. FORCE_INLINE __m128d _mm_load_sd(const double *p)
  4226. {
  4227. #if defined(__aarch64__)
  4228. return vreinterpretq_m128d_f64(vsetq_lane_f64(*p, vdupq_n_f64(0), 0));
  4229. #else
  4230. const float *fp = (const float *) p;
  4231. float ALIGN_STRUCT(16) data[4] = {fp[0], fp[1], 0, 0};
  4232. return vreinterpretq_m128d_f32(vld1q_f32(data));
  4233. #endif
  4234. }
  4235. // Loads 128-bit value. :
  4236. // https://msdn.microsoft.com/en-us/library/atzzad1h(v=vs.80).aspx
  4237. FORCE_INLINE __m128i _mm_load_si128(const __m128i *p)
  4238. {
  4239. return vreinterpretq_m128i_s32(vld1q_s32((const int32_t *) p));
  4240. }
  4241. // Load a double-precision (64-bit) floating-point element from memory into both
  4242. // elements of dst.
  4243. //
  4244. // dst[63:0] := MEM[mem_addr+63:mem_addr]
  4245. // dst[127:64] := MEM[mem_addr+63:mem_addr]
  4246. //
  4247. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load1_pd
  4248. FORCE_INLINE __m128d _mm_load1_pd(const double *p)
  4249. {
  4250. #if defined(__aarch64__)
  4251. return vreinterpretq_m128d_f64(vld1q_dup_f64(p));
  4252. #else
  4253. return vreinterpretq_m128d_s64(vdupq_n_s64(*(const int64_t *) p));
  4254. #endif
  4255. }
  4256. // Load a double-precision (64-bit) floating-point element from memory into the
  4257. // upper element of dst, and copy the lower element from a to dst. mem_addr does
  4258. // not need to be aligned on any particular boundary.
  4259. //
  4260. // dst[63:0] := a[63:0]
  4261. // dst[127:64] := MEM[mem_addr+63:mem_addr]
  4262. //
  4263. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadh_pd
  4264. FORCE_INLINE __m128d _mm_loadh_pd(__m128d a, const double *p)
  4265. {
  4266. #if defined(__aarch64__)
  4267. return vreinterpretq_m128d_f64(
  4268. vcombine_f64(vget_low_f64(vreinterpretq_f64_m128d(a)), vld1_f64(p)));
  4269. #else
  4270. return vreinterpretq_m128d_f32(vcombine_f32(
  4271. vget_low_f32(vreinterpretq_f32_m128d(a)), vld1_f32((const float *) p)));
  4272. #endif
  4273. }
  4274. // Load 64-bit integer from memory into the first element of dst.
  4275. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadl_epi64
  4276. FORCE_INLINE __m128i _mm_loadl_epi64(__m128i const *p)
  4277. {
  4278. /* Load the lower 64 bits of the value pointed to by p into the
  4279. * lower 64 bits of the result, zeroing the upper 64 bits of the result.
  4280. */
  4281. return vreinterpretq_m128i_s32(
  4282. vcombine_s32(vld1_s32((int32_t const *) p), vcreate_s32(0)));
  4283. }
  4284. // Load a double-precision (64-bit) floating-point element from memory into the
  4285. // lower element of dst, and copy the upper element from a to dst. mem_addr does
  4286. // not need to be aligned on any particular boundary.
  4287. //
  4288. // dst[63:0] := MEM[mem_addr+63:mem_addr]
  4289. // dst[127:64] := a[127:64]
  4290. //
  4291. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadl_pd
  4292. FORCE_INLINE __m128d _mm_loadl_pd(__m128d a, const double *p)
  4293. {
  4294. #if defined(__aarch64__)
  4295. return vreinterpretq_m128d_f64(
  4296. vcombine_f64(vld1_f64(p), vget_high_f64(vreinterpretq_f64_m128d(a))));
  4297. #else
  4298. return vreinterpretq_m128d_f32(
  4299. vcombine_f32(vld1_f32((const float *) p),
  4300. vget_high_f32(vreinterpretq_f32_m128d(a))));
  4301. #endif
  4302. }
  4303. // Load 2 double-precision (64-bit) floating-point elements from memory into dst
  4304. // in reverse order. mem_addr must be aligned on a 16-byte boundary or a
  4305. // general-protection exception may be generated.
  4306. //
  4307. // dst[63:0] := MEM[mem_addr+127:mem_addr+64]
  4308. // dst[127:64] := MEM[mem_addr+63:mem_addr]
  4309. //
  4310. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadr_pd
  4311. FORCE_INLINE __m128d _mm_loadr_pd(const double *p)
  4312. {
  4313. #if defined(__aarch64__)
  4314. float64x2_t v = vld1q_f64(p);
  4315. return vreinterpretq_m128d_f64(vextq_f64(v, v, 1));
  4316. #else
  4317. int64x2_t v = vld1q_s64((const int64_t *) p);
  4318. return vreinterpretq_m128d_s64(vextq_s64(v, v, 1));
  4319. #endif
  4320. }
  4321. // Loads two double-precision from unaligned memory, floating-point values.
  4322. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_pd
  4323. FORCE_INLINE __m128d _mm_loadu_pd(const double *p)
  4324. {
  4325. return _mm_load_pd(p);
  4326. }
  4327. // Loads 128-bit value. :
  4328. // https://msdn.microsoft.com/zh-cn/library/f4k12ae8(v=vs.90).aspx
  4329. FORCE_INLINE __m128i _mm_loadu_si128(const __m128i *p)
  4330. {
  4331. return vreinterpretq_m128i_s32(vld1q_s32((const int32_t *) p));
  4332. }
  4333. // Load unaligned 32-bit integer from memory into the first element of dst.
  4334. //
  4335. // dst[31:0] := MEM[mem_addr+31:mem_addr]
  4336. // dst[MAX:32] := 0
  4337. //
  4338. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_si32
  4339. FORCE_INLINE __m128i _mm_loadu_si32(const void *p)
  4340. {
  4341. return vreinterpretq_m128i_s32(
  4342. vsetq_lane_s32(*(const int32_t *) p, vdupq_n_s32(0), 0));
  4343. }
  4344. // Multiplies the 8 signed 16-bit integers from a by the 8 signed 16-bit
  4345. // integers from b.
  4346. //
  4347. // r0 := (a0 * b0) + (a1 * b1)
  4348. // r1 := (a2 * b2) + (a3 * b3)
  4349. // r2 := (a4 * b4) + (a5 * b5)
  4350. // r3 := (a6 * b6) + (a7 * b7)
  4351. // https://msdn.microsoft.com/en-us/library/yht36sa6(v=vs.90).aspx
  4352. FORCE_INLINE __m128i _mm_madd_epi16(__m128i a, __m128i b)
  4353. {
  4354. int32x4_t low = vmull_s16(vget_low_s16(vreinterpretq_s16_m128i(a)),
  4355. vget_low_s16(vreinterpretq_s16_m128i(b)));
  4356. #if defined(__aarch64__)
  4357. int32x4_t high =
  4358. vmull_high_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b));
  4359. return vreinterpretq_m128i_s32(vpaddq_s32(low, high));
  4360. #else
  4361. int32x4_t high = vmull_s16(vget_high_s16(vreinterpretq_s16_m128i(a)),
  4362. vget_high_s16(vreinterpretq_s16_m128i(b)));
  4363. int32x2_t low_sum = vpadd_s32(vget_low_s32(low), vget_high_s32(low));
  4364. int32x2_t high_sum = vpadd_s32(vget_low_s32(high), vget_high_s32(high));
  4365. return vreinterpretq_m128i_s32(vcombine_s32(low_sum, high_sum));
  4366. #endif
  4367. }
  4368. // Conditionally store 8-bit integer elements from a into memory using mask
  4369. // (elements are not stored when the highest bit is not set in the corresponding
  4370. // element) and a non-temporal memory hint. mem_addr does not need to be aligned
  4371. // on any particular boundary.
  4372. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskmoveu_si128
  4373. FORCE_INLINE void _mm_maskmoveu_si128(__m128i a, __m128i mask, char *mem_addr)
  4374. {
  4375. int8x16_t shr_mask = vshrq_n_s8(vreinterpretq_s8_m128i(mask), 7);
  4376. __m128 b = _mm_load_ps((const float *) mem_addr);
  4377. int8x16_t masked =
  4378. vbslq_s8(vreinterpretq_u8_s8(shr_mask), vreinterpretq_s8_m128i(a),
  4379. vreinterpretq_s8_m128(b));
  4380. vst1q_s8((int8_t *) mem_addr, masked);
  4381. }
  4382. // Computes the pairwise maxima of the 8 signed 16-bit integers from a and the 8
  4383. // signed 16-bit integers from b.
  4384. // https://msdn.microsoft.com/en-us/LIBRary/3x060h7c(v=vs.100).aspx
  4385. FORCE_INLINE __m128i _mm_max_epi16(__m128i a, __m128i b)
  4386. {
  4387. return vreinterpretq_m128i_s16(
  4388. vmaxq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
  4389. }
  4390. // Computes the pairwise maxima of the 16 unsigned 8-bit integers from a and the
  4391. // 16 unsigned 8-bit integers from b.
  4392. // https://msdn.microsoft.com/en-us/library/st6634za(v=vs.100).aspx
  4393. FORCE_INLINE __m128i _mm_max_epu8(__m128i a, __m128i b)
  4394. {
  4395. return vreinterpretq_m128i_u8(
  4396. vmaxq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
  4397. }
  4398. // Compare packed double-precision (64-bit) floating-point elements in a and b,
  4399. // and store packed maximum values in dst.
  4400. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_pd
  4401. FORCE_INLINE __m128d _mm_max_pd(__m128d a, __m128d b)
  4402. {
  4403. #if defined(__aarch64__)
  4404. #if SSE2NEON_PRECISE_MINMAX
  4405. float64x2_t _a = vreinterpretq_f64_m128d(a);
  4406. float64x2_t _b = vreinterpretq_f64_m128d(b);
  4407. return vreinterpretq_m128d_f64(vbslq_f64(vcgtq_f64(_a, _b), _a, _b));
  4408. #else
  4409. return vreinterpretq_m128d_f64(
  4410. vmaxq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
  4411. #endif
  4412. #else
  4413. uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
  4414. uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
  4415. uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
  4416. uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
  4417. uint64_t d[2];
  4418. d[0] = (*(double *) &a0) > (*(double *) &b0) ? a0 : b0;
  4419. d[1] = (*(double *) &a1) > (*(double *) &b1) ? a1 : b1;
  4420. return vreinterpretq_m128d_u64(vld1q_u64(d));
  4421. #endif
  4422. }
  4423. // Compare the lower double-precision (64-bit) floating-point elements in a and
  4424. // b, store the maximum value in the lower element of dst, and copy the upper
  4425. // element from a to the upper element of dst.
  4426. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_sd
  4427. FORCE_INLINE __m128d _mm_max_sd(__m128d a, __m128d b)
  4428. {
  4429. #if defined(__aarch64__)
  4430. return _mm_move_sd(a, _mm_max_pd(a, b));
  4431. #else
  4432. double *da = (double *) &a;
  4433. double *db = (double *) &b;
  4434. double c[2] = {da[0] > db[0] ? da[0] : db[0], da[1]};
  4435. return vreinterpretq_m128d_f32(vld1q_f32((float32_t *) c));
  4436. #endif
  4437. }
  4438. // Computes the pairwise minima of the 8 signed 16-bit integers from a and the 8
  4439. // signed 16-bit integers from b.
  4440. // https://msdn.microsoft.com/en-us/library/vstudio/6te997ew(v=vs.100).aspx
  4441. FORCE_INLINE __m128i _mm_min_epi16(__m128i a, __m128i b)
  4442. {
  4443. return vreinterpretq_m128i_s16(
  4444. vminq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
  4445. }
  4446. // Computes the pairwise minima of the 16 unsigned 8-bit integers from a and the
  4447. // 16 unsigned 8-bit integers from b.
  4448. // https://msdn.microsoft.com/ko-kr/library/17k8cf58(v=vs.100).aspxx
  4449. FORCE_INLINE __m128i _mm_min_epu8(__m128i a, __m128i b)
  4450. {
  4451. return vreinterpretq_m128i_u8(
  4452. vminq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
  4453. }
  4454. // Compare packed double-precision (64-bit) floating-point elements in a and b,
  4455. // and store packed minimum values in dst.
  4456. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_pd
  4457. FORCE_INLINE __m128d _mm_min_pd(__m128d a, __m128d b)
  4458. {
  4459. #if defined(__aarch64__)
  4460. #if SSE2NEON_PRECISE_MINMAX
  4461. float64x2_t _a = vreinterpretq_f64_m128d(a);
  4462. float64x2_t _b = vreinterpretq_f64_m128d(b);
  4463. return vreinterpretq_m128d_f64(vbslq_f64(vcltq_f64(_a, _b), _a, _b));
  4464. #else
  4465. return vreinterpretq_m128d_f64(
  4466. vminq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
  4467. #endif
  4468. #else
  4469. uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
  4470. uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
  4471. uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
  4472. uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
  4473. uint64_t d[2];
  4474. d[0] = (*(double *) &a0) < (*(double *) &b0) ? a0 : b0;
  4475. d[1] = (*(double *) &a1) < (*(double *) &b1) ? a1 : b1;
  4476. return vreinterpretq_m128d_u64(vld1q_u64(d));
  4477. #endif
  4478. }
  4479. // Compare the lower double-precision (64-bit) floating-point elements in a and
  4480. // b, store the minimum value in the lower element of dst, and copy the upper
  4481. // element from a to the upper element of dst.
  4482. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_sd
  4483. FORCE_INLINE __m128d _mm_min_sd(__m128d a, __m128d b)
  4484. {
  4485. #if defined(__aarch64__)
  4486. return _mm_move_sd(a, _mm_min_pd(a, b));
  4487. #else
  4488. double *da = (double *) &a;
  4489. double *db = (double *) &b;
  4490. double c[2] = {da[0] < db[0] ? da[0] : db[0], da[1]};
  4491. return vreinterpretq_m128d_f32(vld1q_f32((float32_t *) c));
  4492. #endif
  4493. }
  4494. // Copy the lower 64-bit integer in a to the lower element of dst, and zero the
  4495. // upper element.
  4496. //
  4497. // dst[63:0] := a[63:0]
  4498. // dst[127:64] := 0
  4499. //
  4500. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_move_epi64
  4501. FORCE_INLINE __m128i _mm_move_epi64(__m128i a)
  4502. {
  4503. return vreinterpretq_m128i_s64(
  4504. vsetq_lane_s64(0, vreinterpretq_s64_m128i(a), 1));
  4505. }
  4506. // Move the lower double-precision (64-bit) floating-point element from b to the
  4507. // lower element of dst, and copy the upper element from a to the upper element
  4508. // of dst.
  4509. //
  4510. // dst[63:0] := b[63:0]
  4511. // dst[127:64] := a[127:64]
  4512. //
  4513. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_move_sd
  4514. FORCE_INLINE __m128d _mm_move_sd(__m128d a, __m128d b)
  4515. {
  4516. return vreinterpretq_m128d_f32(
  4517. vcombine_f32(vget_low_f32(vreinterpretq_f32_m128d(b)),
  4518. vget_high_f32(vreinterpretq_f32_m128d(a))));
  4519. }
  4520. // NEON does not provide a version of this function.
  4521. // Creates a 16-bit mask from the most significant bits of the 16 signed or
  4522. // unsigned 8-bit integers in a and zero extends the upper bits.
  4523. // https://msdn.microsoft.com/en-us/library/vstudio/s090c8fk(v=vs.100).aspx
  4524. FORCE_INLINE int _mm_movemask_epi8(__m128i a)
  4525. {
  4526. // Use increasingly wide shifts+adds to collect the sign bits
  4527. // together.
  4528. // Since the widening shifts would be rather confusing to follow in little
  4529. // endian, everything will be illustrated in big endian order instead. This
  4530. // has a different result - the bits would actually be reversed on a big
  4531. // endian machine.
  4532. // Starting input (only half the elements are shown):
  4533. // 89 ff 1d c0 00 10 99 33
  4534. uint8x16_t input = vreinterpretq_u8_m128i(a);
  4535. // Shift out everything but the sign bits with an unsigned shift right.
  4536. //
  4537. // Bytes of the vector::
  4538. // 89 ff 1d c0 00 10 99 33
  4539. // \ \ \ \ \ \ \ \ high_bits = (uint16x4_t)(input >> 7)
  4540. // | | | | | | | |
  4541. // 01 01 00 01 00 00 01 00
  4542. //
  4543. // Bits of first important lane(s):
  4544. // 10001001 (89)
  4545. // \______
  4546. // |
  4547. // 00000001 (01)
  4548. uint16x8_t high_bits = vreinterpretq_u16_u8(vshrq_n_u8(input, 7));
  4549. // Merge the even lanes together with a 16-bit unsigned shift right + add.
  4550. // 'xx' represents garbage data which will be ignored in the final result.
  4551. // In the important bytes, the add functions like a binary OR.
  4552. //
  4553. // 01 01 00 01 00 00 01 00
  4554. // \_ | \_ | \_ | \_ | paired16 = (uint32x4_t)(input + (input >> 7))
  4555. // \| \| \| \|
  4556. // xx 03 xx 01 xx 00 xx 02
  4557. //
  4558. // 00000001 00000001 (01 01)
  4559. // \_______ |
  4560. // \|
  4561. // xxxxxxxx xxxxxx11 (xx 03)
  4562. uint32x4_t paired16 =
  4563. vreinterpretq_u32_u16(vsraq_n_u16(high_bits, high_bits, 7));
  4564. // Repeat with a wider 32-bit shift + add.
  4565. // xx 03 xx 01 xx 00 xx 02
  4566. // \____ | \____ | paired32 = (uint64x1_t)(paired16 + (paired16 >>
  4567. // 14))
  4568. // \| \|
  4569. // xx xx xx 0d xx xx xx 02
  4570. //
  4571. // 00000011 00000001 (03 01)
  4572. // \\_____ ||
  4573. // '----.\||
  4574. // xxxxxxxx xxxx1101 (xx 0d)
  4575. uint64x2_t paired32 =
  4576. vreinterpretq_u64_u32(vsraq_n_u32(paired16, paired16, 14));
  4577. // Last, an even wider 64-bit shift + add to get our result in the low 8 bit
  4578. // lanes. xx xx xx 0d xx xx xx 02
  4579. // \_________ | paired64 = (uint8x8_t)(paired32 + (paired32 >>
  4580. // 28))
  4581. // \|
  4582. // xx xx xx xx xx xx xx d2
  4583. //
  4584. // 00001101 00000010 (0d 02)
  4585. // \ \___ | |
  4586. // '---. \| |
  4587. // xxxxxxxx 11010010 (xx d2)
  4588. uint8x16_t paired64 =
  4589. vreinterpretq_u8_u64(vsraq_n_u64(paired32, paired32, 28));
  4590. // Extract the low 8 bits from each 64-bit lane with 2 8-bit extracts.
  4591. // xx xx xx xx xx xx xx d2
  4592. // || return paired64[0]
  4593. // d2
  4594. // Note: Little endian would return the correct value 4b (01001011) instead.
  4595. return vgetq_lane_u8(paired64, 0) | ((int) vgetq_lane_u8(paired64, 8) << 8);
  4596. }
  4597. // Set each bit of mask dst based on the most significant bit of the
  4598. // corresponding packed double-precision (64-bit) floating-point element in a.
  4599. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movemask_pd
  4600. FORCE_INLINE int _mm_movemask_pd(__m128d a)
  4601. {
  4602. uint64x2_t input = vreinterpretq_u64_m128d(a);
  4603. uint64x2_t high_bits = vshrq_n_u64(input, 63);
  4604. return vgetq_lane_u64(high_bits, 0) | (vgetq_lane_u64(high_bits, 1) << 1);
  4605. }
  4606. // Copy the lower 64-bit integer in a to dst.
  4607. //
  4608. // dst[63:0] := a[63:0]
  4609. //
  4610. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movepi64_pi64
  4611. FORCE_INLINE __m64 _mm_movepi64_pi64(__m128i a)
  4612. {
  4613. return vreinterpret_m64_s64(vget_low_s64(vreinterpretq_s64_m128i(a)));
  4614. }
  4615. // Copy the 64-bit integer a to the lower element of dst, and zero the upper
  4616. // element.
  4617. //
  4618. // dst[63:0] := a[63:0]
  4619. // dst[127:64] := 0
  4620. //
  4621. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movpi64_epi64
  4622. FORCE_INLINE __m128i _mm_movpi64_epi64(__m64 a)
  4623. {
  4624. return vreinterpretq_m128i_s64(
  4625. vcombine_s64(vreinterpret_s64_m64(a), vdup_n_s64(0)));
  4626. }
  4627. // Multiply the low unsigned 32-bit integers from each packed 64-bit element in
  4628. // a and b, and store the unsigned 64-bit results in dst.
  4629. //
  4630. // r0 := (a0 & 0xFFFFFFFF) * (b0 & 0xFFFFFFFF)
  4631. // r1 := (a2 & 0xFFFFFFFF) * (b2 & 0xFFFFFFFF)
  4632. FORCE_INLINE __m128i _mm_mul_epu32(__m128i a, __m128i b)
  4633. {
  4634. // vmull_u32 upcasts instead of masking, so we downcast.
  4635. uint32x2_t a_lo = vmovn_u64(vreinterpretq_u64_m128i(a));
  4636. uint32x2_t b_lo = vmovn_u64(vreinterpretq_u64_m128i(b));
  4637. return vreinterpretq_m128i_u64(vmull_u32(a_lo, b_lo));
  4638. }
  4639. // Multiply packed double-precision (64-bit) floating-point elements in a and b,
  4640. // and store the results in dst.
  4641. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mul_pd
  4642. FORCE_INLINE __m128d _mm_mul_pd(__m128d a, __m128d b)
  4643. {
  4644. #if defined(__aarch64__)
  4645. return vreinterpretq_m128d_f64(
  4646. vmulq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
  4647. #else
  4648. double *da = (double *) &a;
  4649. double *db = (double *) &b;
  4650. double c[2];
  4651. c[0] = da[0] * db[0];
  4652. c[1] = da[1] * db[1];
  4653. return vld1q_f32((float32_t *) c);
  4654. #endif
  4655. }
  4656. // Multiply the lower double-precision (64-bit) floating-point element in a and
  4657. // b, store the result in the lower element of dst, and copy the upper element
  4658. // from a to the upper element of dst.
  4659. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mul_sd
  4660. FORCE_INLINE __m128d _mm_mul_sd(__m128d a, __m128d b)
  4661. {
  4662. return _mm_move_sd(a, _mm_mul_pd(a, b));
  4663. }
  4664. // Multiply the low unsigned 32-bit integers from a and b, and store the
  4665. // unsigned 64-bit result in dst.
  4666. //
  4667. // dst[63:0] := a[31:0] * b[31:0]
  4668. //
  4669. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mul_su32
  4670. FORCE_INLINE __m64 _mm_mul_su32(__m64 a, __m64 b)
  4671. {
  4672. return vreinterpret_m64_u64(vget_low_u64(
  4673. vmull_u32(vreinterpret_u32_m64(a), vreinterpret_u32_m64(b))));
  4674. }
  4675. // Multiplies the 8 signed 16-bit integers from a by the 8 signed 16-bit
  4676. // integers from b.
  4677. //
  4678. // r0 := (a0 * b0)[31:16]
  4679. // r1 := (a1 * b1)[31:16]
  4680. // ...
  4681. // r7 := (a7 * b7)[31:16]
  4682. //
  4683. // https://msdn.microsoft.com/en-us/library/vstudio/59hddw1d(v=vs.100).aspx
  4684. FORCE_INLINE __m128i _mm_mulhi_epi16(__m128i a, __m128i b)
  4685. {
  4686. /* FIXME: issue with large values because of result saturation */
  4687. // int16x8_t ret = vqdmulhq_s16(vreinterpretq_s16_m128i(a),
  4688. // vreinterpretq_s16_m128i(b)); /* =2*a*b */ return
  4689. // vreinterpretq_m128i_s16(vshrq_n_s16(ret, 1));
  4690. int16x4_t a3210 = vget_low_s16(vreinterpretq_s16_m128i(a));
  4691. int16x4_t b3210 = vget_low_s16(vreinterpretq_s16_m128i(b));
  4692. int32x4_t ab3210 = vmull_s16(a3210, b3210); /* 3333222211110000 */
  4693. int16x4_t a7654 = vget_high_s16(vreinterpretq_s16_m128i(a));
  4694. int16x4_t b7654 = vget_high_s16(vreinterpretq_s16_m128i(b));
  4695. int32x4_t ab7654 = vmull_s16(a7654, b7654); /* 7777666655554444 */
  4696. uint16x8x2_t r =
  4697. vuzpq_u16(vreinterpretq_u16_s32(ab3210), vreinterpretq_u16_s32(ab7654));
  4698. return vreinterpretq_m128i_u16(r.val[1]);
  4699. }
  4700. // Multiply the packed unsigned 16-bit integers in a and b, producing
  4701. // intermediate 32-bit integers, and store the high 16 bits of the intermediate
  4702. // integers in dst.
  4703. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mulhi_epu16
  4704. FORCE_INLINE __m128i _mm_mulhi_epu16(__m128i a, __m128i b)
  4705. {
  4706. uint16x4_t a3210 = vget_low_u16(vreinterpretq_u16_m128i(a));
  4707. uint16x4_t b3210 = vget_low_u16(vreinterpretq_u16_m128i(b));
  4708. uint32x4_t ab3210 = vmull_u16(a3210, b3210);
  4709. #if defined(__aarch64__)
  4710. uint32x4_t ab7654 =
  4711. vmull_high_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b));
  4712. uint16x8_t r = vuzp2q_u16(vreinterpretq_u16_u32(ab3210),
  4713. vreinterpretq_u16_u32(ab7654));
  4714. return vreinterpretq_m128i_u16(r);
  4715. #else
  4716. uint16x4_t a7654 = vget_high_u16(vreinterpretq_u16_m128i(a));
  4717. uint16x4_t b7654 = vget_high_u16(vreinterpretq_u16_m128i(b));
  4718. uint32x4_t ab7654 = vmull_u16(a7654, b7654);
  4719. uint16x8x2_t r =
  4720. vuzpq_u16(vreinterpretq_u16_u32(ab3210), vreinterpretq_u16_u32(ab7654));
  4721. return vreinterpretq_m128i_u16(r.val[1]);
  4722. #endif
  4723. }
  4724. // Multiplies the 8 signed or unsigned 16-bit integers from a by the 8 signed or
  4725. // unsigned 16-bit integers from b.
  4726. //
  4727. // r0 := (a0 * b0)[15:0]
  4728. // r1 := (a1 * b1)[15:0]
  4729. // ...
  4730. // r7 := (a7 * b7)[15:0]
  4731. //
  4732. // https://msdn.microsoft.com/en-us/library/vstudio/9ks1472s(v=vs.100).aspx
  4733. FORCE_INLINE __m128i _mm_mullo_epi16(__m128i a, __m128i b)
  4734. {
  4735. return vreinterpretq_m128i_s16(
  4736. vmulq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
  4737. }
  4738. // Compute the bitwise OR of packed double-precision (64-bit) floating-point
  4739. // elements in a and b, and store the results in dst.
  4740. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_or_pd
  4741. FORCE_INLINE __m128d _mm_or_pd(__m128d a, __m128d b)
  4742. {
  4743. return vreinterpretq_m128d_s64(
  4744. vorrq_s64(vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b)));
  4745. }
  4746. // Computes the bitwise OR of the 128-bit value in a and the 128-bit value in b.
  4747. //
  4748. // r := a | b
  4749. //
  4750. // https://msdn.microsoft.com/en-us/library/vstudio/ew8ty0db(v=vs.100).aspx
  4751. FORCE_INLINE __m128i _mm_or_si128(__m128i a, __m128i b)
  4752. {
  4753. return vreinterpretq_m128i_s32(
  4754. vorrq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
  4755. }
  4756. // Packs the 16 signed 16-bit integers from a and b into 8-bit integers and
  4757. // saturates.
  4758. // https://msdn.microsoft.com/en-us/library/k4y4f7w5%28v=vs.90%29.aspx
  4759. FORCE_INLINE __m128i _mm_packs_epi16(__m128i a, __m128i b)
  4760. {
  4761. return vreinterpretq_m128i_s8(
  4762. vcombine_s8(vqmovn_s16(vreinterpretq_s16_m128i(a)),
  4763. vqmovn_s16(vreinterpretq_s16_m128i(b))));
  4764. }
  4765. // Packs the 8 signed 32-bit integers from a and b into signed 16-bit integers
  4766. // and saturates.
  4767. //
  4768. // r0 := SignedSaturate(a0)
  4769. // r1 := SignedSaturate(a1)
  4770. // r2 := SignedSaturate(a2)
  4771. // r3 := SignedSaturate(a3)
  4772. // r4 := SignedSaturate(b0)
  4773. // r5 := SignedSaturate(b1)
  4774. // r6 := SignedSaturate(b2)
  4775. // r7 := SignedSaturate(b3)
  4776. //
  4777. // https://msdn.microsoft.com/en-us/library/393t56f9%28v=vs.90%29.aspx
  4778. FORCE_INLINE __m128i _mm_packs_epi32(__m128i a, __m128i b)
  4779. {
  4780. return vreinterpretq_m128i_s16(
  4781. vcombine_s16(vqmovn_s32(vreinterpretq_s32_m128i(a)),
  4782. vqmovn_s32(vreinterpretq_s32_m128i(b))));
  4783. }
  4784. // Packs the 16 signed 16 - bit integers from a and b into 8 - bit unsigned
  4785. // integers and saturates.
  4786. //
  4787. // r0 := UnsignedSaturate(a0)
  4788. // r1 := UnsignedSaturate(a1)
  4789. // ...
  4790. // r7 := UnsignedSaturate(a7)
  4791. // r8 := UnsignedSaturate(b0)
  4792. // r9 := UnsignedSaturate(b1)
  4793. // ...
  4794. // r15 := UnsignedSaturate(b7)
  4795. //
  4796. // https://msdn.microsoft.com/en-us/library/07ad1wx4(v=vs.100).aspx
  4797. FORCE_INLINE __m128i _mm_packus_epi16(const __m128i a, const __m128i b)
  4798. {
  4799. return vreinterpretq_m128i_u8(
  4800. vcombine_u8(vqmovun_s16(vreinterpretq_s16_m128i(a)),
  4801. vqmovun_s16(vreinterpretq_s16_m128i(b))));
  4802. }
  4803. // Pause the processor. This is typically used in spin-wait loops and depending
  4804. // on the x86 processor typical values are in the 40-100 cycle range. The
  4805. // 'yield' instruction isn't a good fit because it's effectively a nop on most
  4806. // Arm cores. Experience with several databases has shown has shown an 'isb' is
  4807. // a reasonable approximation.
  4808. FORCE_INLINE void _mm_pause()
  4809. {
  4810. __asm__ __volatile__("isb\n");
  4811. }
  4812. // Compute the absolute differences of packed unsigned 8-bit integers in a and
  4813. // b, then horizontally sum each consecutive 8 differences to produce two
  4814. // unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low
  4815. // 16 bits of 64-bit elements in dst.
  4816. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sad_epu8
  4817. FORCE_INLINE __m128i _mm_sad_epu8(__m128i a, __m128i b)
  4818. {
  4819. uint16x8_t t = vpaddlq_u8(vabdq_u8((uint8x16_t) a, (uint8x16_t) b));
  4820. return vreinterpretq_m128i_u64(vpaddlq_u32(vpaddlq_u16(t)));
  4821. }
  4822. // Sets the 8 signed 16-bit integer values.
  4823. // https://msdn.microsoft.com/en-au/library/3e0fek84(v=vs.90).aspx
  4824. FORCE_INLINE __m128i _mm_set_epi16(short i7,
  4825. short i6,
  4826. short i5,
  4827. short i4,
  4828. short i3,
  4829. short i2,
  4830. short i1,
  4831. short i0)
  4832. {
  4833. int16_t ALIGN_STRUCT(16) data[8] = {i0, i1, i2, i3, i4, i5, i6, i7};
  4834. return vreinterpretq_m128i_s16(vld1q_s16(data));
  4835. }
  4836. // Sets the 4 signed 32-bit integer values.
  4837. // https://msdn.microsoft.com/en-us/library/vstudio/019beekt(v=vs.100).aspx
  4838. FORCE_INLINE __m128i _mm_set_epi32(int i3, int i2, int i1, int i0)
  4839. {
  4840. int32_t ALIGN_STRUCT(16) data[4] = {i0, i1, i2, i3};
  4841. return vreinterpretq_m128i_s32(vld1q_s32(data));
  4842. }
  4843. // Returns the __m128i structure with its two 64-bit integer values
  4844. // initialized to the values of the two 64-bit integers passed in.
  4845. // https://msdn.microsoft.com/en-us/library/dk2sdw0h(v=vs.120).aspx
  4846. FORCE_INLINE __m128i _mm_set_epi64(__m64 i1, __m64 i2)
  4847. {
  4848. return _mm_set_epi64x((int64_t) i1, (int64_t) i2);
  4849. }
  4850. // Returns the __m128i structure with its two 64-bit integer values
  4851. // initialized to the values of the two 64-bit integers passed in.
  4852. // https://msdn.microsoft.com/en-us/library/dk2sdw0h(v=vs.120).aspx
  4853. FORCE_INLINE __m128i _mm_set_epi64x(int64_t i1, int64_t i2)
  4854. {
  4855. return vreinterpretq_m128i_s64(
  4856. vcombine_s64(vcreate_s64(i2), vcreate_s64(i1)));
  4857. }
  4858. // Sets the 16 signed 8-bit integer values.
  4859. // https://msdn.microsoft.com/en-us/library/x0cx8zd3(v=vs.90).aspx
  4860. FORCE_INLINE __m128i _mm_set_epi8(signed char b15,
  4861. signed char b14,
  4862. signed char b13,
  4863. signed char b12,
  4864. signed char b11,
  4865. signed char b10,
  4866. signed char b9,
  4867. signed char b8,
  4868. signed char b7,
  4869. signed char b6,
  4870. signed char b5,
  4871. signed char b4,
  4872. signed char b3,
  4873. signed char b2,
  4874. signed char b1,
  4875. signed char b0)
  4876. {
  4877. int8_t ALIGN_STRUCT(16)
  4878. data[16] = {(int8_t) b0, (int8_t) b1, (int8_t) b2, (int8_t) b3,
  4879. (int8_t) b4, (int8_t) b5, (int8_t) b6, (int8_t) b7,
  4880. (int8_t) b8, (int8_t) b9, (int8_t) b10, (int8_t) b11,
  4881. (int8_t) b12, (int8_t) b13, (int8_t) b14, (int8_t) b15};
  4882. return (__m128i) vld1q_s8(data);
  4883. }
  4884. // Set packed double-precision (64-bit) floating-point elements in dst with the
  4885. // supplied values.
  4886. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_pd
  4887. FORCE_INLINE __m128d _mm_set_pd(double e1, double e0)
  4888. {
  4889. double ALIGN_STRUCT(16) data[2] = {e0, e1};
  4890. #if defined(__aarch64__)
  4891. return vreinterpretq_m128d_f64(vld1q_f64((float64_t *) data));
  4892. #else
  4893. return vreinterpretq_m128d_f32(vld1q_f32((float32_t *) data));
  4894. #endif
  4895. }
  4896. // Broadcast double-precision (64-bit) floating-point value a to all elements of
  4897. // dst.
  4898. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_pd1
  4899. #define _mm_set_pd1 _mm_set1_pd
  4900. // Copy double-precision (64-bit) floating-point element a to the lower element
  4901. // of dst, and zero the upper element.
  4902. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_sd
  4903. FORCE_INLINE __m128d _mm_set_sd(double a)
  4904. {
  4905. #if defined(__aarch64__)
  4906. return vreinterpretq_m128d_f64(vsetq_lane_f64(a, vdupq_n_f64(0), 0));
  4907. #else
  4908. return _mm_set_pd(0, a);
  4909. #endif
  4910. }
  4911. // Sets the 8 signed 16-bit integer values to w.
  4912. //
  4913. // r0 := w
  4914. // r1 := w
  4915. // ...
  4916. // r7 := w
  4917. //
  4918. // https://msdn.microsoft.com/en-us/library/k0ya3x0e(v=vs.90).aspx
  4919. FORCE_INLINE __m128i _mm_set1_epi16(short w)
  4920. {
  4921. return vreinterpretq_m128i_s16(vdupq_n_s16(w));
  4922. }
  4923. // Sets the 4 signed 32-bit integer values to i.
  4924. //
  4925. // r0 := i
  4926. // r1 := i
  4927. // r2 := i
  4928. // r3 := I
  4929. //
  4930. // https://msdn.microsoft.com/en-us/library/vstudio/h4xscxat(v=vs.100).aspx
  4931. FORCE_INLINE __m128i _mm_set1_epi32(int _i)
  4932. {
  4933. return vreinterpretq_m128i_s32(vdupq_n_s32(_i));
  4934. }
  4935. // Sets the 2 signed 64-bit integer values to i.
  4936. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/whtfzhzk(v=vs.100)
  4937. FORCE_INLINE __m128i _mm_set1_epi64(__m64 _i)
  4938. {
  4939. return vreinterpretq_m128i_s64(vdupq_n_s64((int64_t) _i));
  4940. }
  4941. // Sets the 2 signed 64-bit integer values to i.
  4942. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_epi64x
  4943. FORCE_INLINE __m128i _mm_set1_epi64x(int64_t _i)
  4944. {
  4945. return vreinterpretq_m128i_s64(vdupq_n_s64(_i));
  4946. }
  4947. // Sets the 16 signed 8-bit integer values to b.
  4948. //
  4949. // r0 := b
  4950. // r1 := b
  4951. // ...
  4952. // r15 := b
  4953. //
  4954. // https://msdn.microsoft.com/en-us/library/6e14xhyf(v=vs.100).aspx
  4955. FORCE_INLINE __m128i _mm_set1_epi8(signed char w)
  4956. {
  4957. return vreinterpretq_m128i_s8(vdupq_n_s8(w));
  4958. }
  4959. // Broadcast double-precision (64-bit) floating-point value a to all elements of
  4960. // dst.
  4961. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_pd
  4962. FORCE_INLINE __m128d _mm_set1_pd(double d)
  4963. {
  4964. #if defined(__aarch64__)
  4965. return vreinterpretq_m128d_f64(vdupq_n_f64(d));
  4966. #else
  4967. return vreinterpretq_m128d_s64(vdupq_n_s64(*(int64_t *) &d));
  4968. #endif
  4969. }
  4970. // Sets the 8 signed 16-bit integer values in reverse order.
  4971. //
  4972. // Return Value
  4973. // r0 := w0
  4974. // r1 := w1
  4975. // ...
  4976. // r7 := w7
  4977. FORCE_INLINE __m128i _mm_setr_epi16(short w0,
  4978. short w1,
  4979. short w2,
  4980. short w3,
  4981. short w4,
  4982. short w5,
  4983. short w6,
  4984. short w7)
  4985. {
  4986. int16_t ALIGN_STRUCT(16) data[8] = {w0, w1, w2, w3, w4, w5, w6, w7};
  4987. return vreinterpretq_m128i_s16(vld1q_s16((int16_t *) data));
  4988. }
  4989. // Sets the 4 signed 32-bit integer values in reverse order
  4990. // https://technet.microsoft.com/en-us/library/security/27yb3ee5(v=vs.90).aspx
  4991. FORCE_INLINE __m128i _mm_setr_epi32(int i3, int i2, int i1, int i0)
  4992. {
  4993. int32_t ALIGN_STRUCT(16) data[4] = {i3, i2, i1, i0};
  4994. return vreinterpretq_m128i_s32(vld1q_s32(data));
  4995. }
  4996. // Set packed 64-bit integers in dst with the supplied values in reverse order.
  4997. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setr_epi64
  4998. FORCE_INLINE __m128i _mm_setr_epi64(__m64 e1, __m64 e0)
  4999. {
  5000. return vreinterpretq_m128i_s64(vcombine_s64(e1, e0));
  5001. }
  5002. // Sets the 16 signed 8-bit integer values in reverse order.
  5003. // https://msdn.microsoft.com/en-us/library/2khb9c7k(v=vs.90).aspx
  5004. FORCE_INLINE __m128i _mm_setr_epi8(signed char b0,
  5005. signed char b1,
  5006. signed char b2,
  5007. signed char b3,
  5008. signed char b4,
  5009. signed char b5,
  5010. signed char b6,
  5011. signed char b7,
  5012. signed char b8,
  5013. signed char b9,
  5014. signed char b10,
  5015. signed char b11,
  5016. signed char b12,
  5017. signed char b13,
  5018. signed char b14,
  5019. signed char b15)
  5020. {
  5021. int8_t ALIGN_STRUCT(16)
  5022. data[16] = {(int8_t) b0, (int8_t) b1, (int8_t) b2, (int8_t) b3,
  5023. (int8_t) b4, (int8_t) b5, (int8_t) b6, (int8_t) b7,
  5024. (int8_t) b8, (int8_t) b9, (int8_t) b10, (int8_t) b11,
  5025. (int8_t) b12, (int8_t) b13, (int8_t) b14, (int8_t) b15};
  5026. return (__m128i) vld1q_s8(data);
  5027. }
  5028. // Set packed double-precision (64-bit) floating-point elements in dst with the
  5029. // supplied values in reverse order.
  5030. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setr_pd
  5031. FORCE_INLINE __m128d _mm_setr_pd(double e1, double e0)
  5032. {
  5033. return _mm_set_pd(e0, e1);
  5034. }
  5035. // Return vector of type __m128d with all elements set to zero.
  5036. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setzero_pd
  5037. FORCE_INLINE __m128d _mm_setzero_pd(void)
  5038. {
  5039. #if defined(__aarch64__)
  5040. return vreinterpretq_m128d_f64(vdupq_n_f64(0));
  5041. #else
  5042. return vreinterpretq_m128d_f32(vdupq_n_f32(0));
  5043. #endif
  5044. }
  5045. // Sets the 128-bit value to zero
  5046. // https://msdn.microsoft.com/en-us/library/vstudio/ys7dw0kh(v=vs.100).aspx
  5047. FORCE_INLINE __m128i _mm_setzero_si128(void)
  5048. {
  5049. return vreinterpretq_m128i_s32(vdupq_n_s32(0));
  5050. }
  5051. // Shuffles the 4 signed or unsigned 32-bit integers in a as specified by imm.
  5052. // https://msdn.microsoft.com/en-us/library/56f67xbk%28v=vs.90%29.aspx
  5053. // FORCE_INLINE __m128i _mm_shuffle_epi32(__m128i a,
  5054. // __constrange(0,255) int imm)
  5055. #ifdef _sse2neon_shuffle
  5056. #define _mm_shuffle_epi32(a, imm) \
  5057. __extension__({ \
  5058. int32x4_t _input = vreinterpretq_s32_m128i(a); \
  5059. int32x4_t _shuf = \
  5060. vshuffleq_s32(_input, _input, (imm) & (0x3), ((imm) >> 2) & 0x3, \
  5061. ((imm) >> 4) & 0x3, ((imm) >> 6) & 0x3); \
  5062. vreinterpretq_m128i_s32(_shuf); \
  5063. })
  5064. #else // generic
  5065. #define _mm_shuffle_epi32(a, imm) \
  5066. __extension__({ \
  5067. __m128i ret; \
  5068. switch (imm) { \
  5069. case _MM_SHUFFLE(1, 0, 3, 2): \
  5070. ret = _mm_shuffle_epi_1032((a)); \
  5071. break; \
  5072. case _MM_SHUFFLE(2, 3, 0, 1): \
  5073. ret = _mm_shuffle_epi_2301((a)); \
  5074. break; \
  5075. case _MM_SHUFFLE(0, 3, 2, 1): \
  5076. ret = _mm_shuffle_epi_0321((a)); \
  5077. break; \
  5078. case _MM_SHUFFLE(2, 1, 0, 3): \
  5079. ret = _mm_shuffle_epi_2103((a)); \
  5080. break; \
  5081. case _MM_SHUFFLE(1, 0, 1, 0): \
  5082. ret = _mm_shuffle_epi_1010((a)); \
  5083. break; \
  5084. case _MM_SHUFFLE(1, 0, 0, 1): \
  5085. ret = _mm_shuffle_epi_1001((a)); \
  5086. break; \
  5087. case _MM_SHUFFLE(0, 1, 0, 1): \
  5088. ret = _mm_shuffle_epi_0101((a)); \
  5089. break; \
  5090. case _MM_SHUFFLE(2, 2, 1, 1): \
  5091. ret = _mm_shuffle_epi_2211((a)); \
  5092. break; \
  5093. case _MM_SHUFFLE(0, 1, 2, 2): \
  5094. ret = _mm_shuffle_epi_0122((a)); \
  5095. break; \
  5096. case _MM_SHUFFLE(3, 3, 3, 2): \
  5097. ret = _mm_shuffle_epi_3332((a)); \
  5098. break; \
  5099. case _MM_SHUFFLE(0, 0, 0, 0): \
  5100. ret = _mm_shuffle_epi32_splat((a), 0); \
  5101. break; \
  5102. case _MM_SHUFFLE(1, 1, 1, 1): \
  5103. ret = _mm_shuffle_epi32_splat((a), 1); \
  5104. break; \
  5105. case _MM_SHUFFLE(2, 2, 2, 2): \
  5106. ret = _mm_shuffle_epi32_splat((a), 2); \
  5107. break; \
  5108. case _MM_SHUFFLE(3, 3, 3, 3): \
  5109. ret = _mm_shuffle_epi32_splat((a), 3); \
  5110. break; \
  5111. default: \
  5112. ret = _mm_shuffle_epi32_default((a), (imm)); \
  5113. break; \
  5114. } \
  5115. ret; \
  5116. })
  5117. #endif
  5118. // Shuffle double-precision (64-bit) floating-point elements using the control
  5119. // in imm8, and store the results in dst.
  5120. //
  5121. // dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64]
  5122. // dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64]
  5123. //
  5124. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_pd
  5125. #ifdef _sse2neon_shuffle
  5126. #define _mm_shuffle_pd(a, b, imm8) \
  5127. vreinterpretq_m128d_s64( \
  5128. vshuffleq_s64(vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b), \
  5129. imm8 & 0x1, ((imm8 & 0x2) >> 1) + 2))
  5130. #else
  5131. #define _mm_shuffle_pd(a, b, imm8) \
  5132. _mm_castsi128_pd(_mm_set_epi64x( \
  5133. vgetq_lane_s64(vreinterpretq_s64_m128d(b), (imm8 & 0x2) >> 1), \
  5134. vgetq_lane_s64(vreinterpretq_s64_m128d(a), imm8 & 0x1)))
  5135. #endif
  5136. // FORCE_INLINE __m128i _mm_shufflehi_epi16(__m128i a,
  5137. // __constrange(0,255) int imm)
  5138. #ifdef _sse2neon_shuffle
  5139. #define _mm_shufflehi_epi16(a, imm) \
  5140. __extension__({ \
  5141. int16x8_t _input = vreinterpretq_s16_m128i(a); \
  5142. int16x8_t _shuf = \
  5143. vshuffleq_s16(_input, _input, 0, 1, 2, 3, ((imm) & (0x3)) + 4, \
  5144. (((imm) >> 2) & 0x3) + 4, (((imm) >> 4) & 0x3) + 4, \
  5145. (((imm) >> 6) & 0x3) + 4); \
  5146. vreinterpretq_m128i_s16(_shuf); \
  5147. })
  5148. #else // generic
  5149. #define _mm_shufflehi_epi16(a, imm) _mm_shufflehi_epi16_function((a), (imm))
  5150. #endif
  5151. // FORCE_INLINE __m128i _mm_shufflelo_epi16(__m128i a,
  5152. // __constrange(0,255) int imm)
  5153. #ifdef _sse2neon_shuffle
  5154. #define _mm_shufflelo_epi16(a, imm) \
  5155. __extension__({ \
  5156. int16x8_t _input = vreinterpretq_s16_m128i(a); \
  5157. int16x8_t _shuf = vshuffleq_s16( \
  5158. _input, _input, ((imm) & (0x3)), (((imm) >> 2) & 0x3), \
  5159. (((imm) >> 4) & 0x3), (((imm) >> 6) & 0x3), 4, 5, 6, 7); \
  5160. vreinterpretq_m128i_s16(_shuf); \
  5161. })
  5162. #else // generic
  5163. #define _mm_shufflelo_epi16(a, imm) _mm_shufflelo_epi16_function((a), (imm))
  5164. #endif
  5165. // Shift packed 16-bit integers in a left by count while shifting in zeros, and
  5166. // store the results in dst.
  5167. //
  5168. // FOR j := 0 to 7
  5169. // i := j*16
  5170. // IF count[63:0] > 15
  5171. // dst[i+15:i] := 0
  5172. // ELSE
  5173. // dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[63:0])
  5174. // FI
  5175. // ENDFOR
  5176. //
  5177. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sll_epi16
  5178. FORCE_INLINE __m128i _mm_sll_epi16(__m128i a, __m128i count)
  5179. {
  5180. uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
  5181. if (_sse2neon_unlikely(c & ~15))
  5182. return _mm_setzero_si128();
  5183. int16x8_t vc = vdupq_n_s16((int16_t) c);
  5184. return vreinterpretq_m128i_s16(vshlq_s16(vreinterpretq_s16_m128i(a), vc));
  5185. }
  5186. // Shift packed 32-bit integers in a left by count while shifting in zeros, and
  5187. // store the results in dst.
  5188. //
  5189. // FOR j := 0 to 3
  5190. // i := j*32
  5191. // IF count[63:0] > 31
  5192. // dst[i+31:i] := 0
  5193. // ELSE
  5194. // dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[63:0])
  5195. // FI
  5196. // ENDFOR
  5197. //
  5198. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sll_epi32
  5199. FORCE_INLINE __m128i _mm_sll_epi32(__m128i a, __m128i count)
  5200. {
  5201. uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
  5202. if (_sse2neon_unlikely(c & ~31))
  5203. return _mm_setzero_si128();
  5204. int32x4_t vc = vdupq_n_s32((int32_t) c);
  5205. return vreinterpretq_m128i_s32(vshlq_s32(vreinterpretq_s32_m128i(a), vc));
  5206. }
  5207. // Shift packed 64-bit integers in a left by count while shifting in zeros, and
  5208. // store the results in dst.
  5209. //
  5210. // FOR j := 0 to 1
  5211. // i := j*64
  5212. // IF count[63:0] > 63
  5213. // dst[i+63:i] := 0
  5214. // ELSE
  5215. // dst[i+63:i] := ZeroExtend64(a[i+63:i] << count[63:0])
  5216. // FI
  5217. // ENDFOR
  5218. //
  5219. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sll_epi64
  5220. FORCE_INLINE __m128i _mm_sll_epi64(__m128i a, __m128i count)
  5221. {
  5222. uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
  5223. if (_sse2neon_unlikely(c & ~63))
  5224. return _mm_setzero_si128();
  5225. int64x2_t vc = vdupq_n_s64((int64_t) c);
  5226. return vreinterpretq_m128i_s64(vshlq_s64(vreinterpretq_s64_m128i(a), vc));
  5227. }
  5228. // Shift packed 16-bit integers in a left by imm8 while shifting in zeros, and
  5229. // store the results in dst.
  5230. //
  5231. // FOR j := 0 to 7
  5232. // i := j*16
  5233. // IF imm8[7:0] > 15
  5234. // dst[i+15:i] := 0
  5235. // ELSE
  5236. // dst[i+15:i] := ZeroExtend16(a[i+15:i] << imm8[7:0])
  5237. // FI
  5238. // ENDFOR
  5239. //
  5240. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_slli_epi16
  5241. FORCE_INLINE __m128i _mm_slli_epi16(__m128i a, int imm)
  5242. {
  5243. if (_sse2neon_unlikely(imm & ~15))
  5244. return _mm_setzero_si128();
  5245. return vreinterpretq_m128i_s16(
  5246. vshlq_s16(vreinterpretq_s16_m128i(a), vdupq_n_s16(imm)));
  5247. }
  5248. // Shift packed 32-bit integers in a left by imm8 while shifting in zeros, and
  5249. // store the results in dst.
  5250. //
  5251. // FOR j := 0 to 3
  5252. // i := j*32
  5253. // IF imm8[7:0] > 31
  5254. // dst[i+31:i] := 0
  5255. // ELSE
  5256. // dst[i+31:i] := ZeroExtend32(a[i+31:i] << imm8[7:0])
  5257. // FI
  5258. // ENDFOR
  5259. //
  5260. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_slli_epi32
  5261. FORCE_INLINE __m128i _mm_slli_epi32(__m128i a, int imm)
  5262. {
  5263. if (_sse2neon_unlikely(imm & ~31))
  5264. return _mm_setzero_si128();
  5265. return vreinterpretq_m128i_s32(
  5266. vshlq_s32(vreinterpretq_s32_m128i(a), vdupq_n_s32(imm)));
  5267. }
  5268. // Shift packed 64-bit integers in a left by imm8 while shifting in zeros, and
  5269. // store the results in dst.
  5270. //
  5271. // FOR j := 0 to 1
  5272. // i := j*64
  5273. // IF imm8[7:0] > 63
  5274. // dst[i+63:i] := 0
  5275. // ELSE
  5276. // dst[i+63:i] := ZeroExtend64(a[i+63:i] << imm8[7:0])
  5277. // FI
  5278. // ENDFOR
  5279. //
  5280. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_slli_epi64
  5281. FORCE_INLINE __m128i _mm_slli_epi64(__m128i a, int imm)
  5282. {
  5283. if (_sse2neon_unlikely(imm & ~63))
  5284. return _mm_setzero_si128();
  5285. return vreinterpretq_m128i_s64(
  5286. vshlq_s64(vreinterpretq_s64_m128i(a), vdupq_n_s64(imm)));
  5287. }
  5288. // Shift a left by imm8 bytes while shifting in zeros, and store the results in
  5289. // dst.
  5290. //
  5291. // tmp := imm8[7:0]
  5292. // IF tmp > 15
  5293. // tmp := 16
  5294. // FI
  5295. // dst[127:0] := a[127:0] << (tmp*8)
  5296. //
  5297. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_slli_si128
  5298. #define _mm_slli_si128(a, imm) \
  5299. __extension__({ \
  5300. int8x16_t ret; \
  5301. if (_sse2neon_unlikely(imm == 0)) \
  5302. ret = vreinterpretq_s8_m128i(a); \
  5303. else if (_sse2neon_unlikely((imm) & ~15)) \
  5304. ret = vdupq_n_s8(0); \
  5305. else \
  5306. ret = vextq_s8(vdupq_n_s8(0), vreinterpretq_s8_m128i(a), \
  5307. ((imm <= 0 || imm > 15) ? 0 : (16 - imm))); \
  5308. vreinterpretq_m128i_s8(ret); \
  5309. })
  5310. // Compute the square root of packed double-precision (64-bit) floating-point
  5311. // elements in a, and store the results in dst.
  5312. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sqrt_pd
  5313. FORCE_INLINE __m128d _mm_sqrt_pd(__m128d a)
  5314. {
  5315. #if defined(__aarch64__)
  5316. return vreinterpretq_m128d_f64(vsqrtq_f64(vreinterpretq_f64_m128d(a)));
  5317. #else
  5318. double a0 = sqrt(((double *) &a)[0]);
  5319. double a1 = sqrt(((double *) &a)[1]);
  5320. return _mm_set_pd(a1, a0);
  5321. #endif
  5322. }
  5323. // Compute the square root of the lower double-precision (64-bit) floating-point
  5324. // element in b, store the result in the lower element of dst, and copy the
  5325. // upper element from a to the upper element of dst.
  5326. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sqrt_sd
  5327. FORCE_INLINE __m128d _mm_sqrt_sd(__m128d a, __m128d b)
  5328. {
  5329. #if defined(__aarch64__)
  5330. return _mm_move_sd(a, _mm_sqrt_pd(b));
  5331. #else
  5332. return _mm_set_pd(((double *) &a)[1], sqrt(((double *) &b)[0]));
  5333. #endif
  5334. }
  5335. // Shift packed 16-bit integers in a right by count while shifting in sign bits,
  5336. // and store the results in dst.
  5337. //
  5338. // FOR j := 0 to 7
  5339. // i := j*16
  5340. // IF count[63:0] > 15
  5341. // dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0)
  5342. // ELSE
  5343. // dst[i+15:i] := SignExtend16(a[i+15:i] >> count[63:0])
  5344. // FI
  5345. // ENDFOR
  5346. //
  5347. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sra_epi16
  5348. FORCE_INLINE __m128i _mm_sra_epi16(__m128i a, __m128i count)
  5349. {
  5350. int64_t c = (int64_t) vget_low_s64((int64x2_t) count);
  5351. if (_sse2neon_unlikely(c & ~15))
  5352. return _mm_cmplt_epi16(a, _mm_setzero_si128());
  5353. return vreinterpretq_m128i_s16(vshlq_s16((int16x8_t) a, vdupq_n_s16(-c)));
  5354. }
  5355. // Shift packed 32-bit integers in a right by count while shifting in sign bits,
  5356. // and store the results in dst.
  5357. //
  5358. // FOR j := 0 to 3
  5359. // i := j*32
  5360. // IF count[63:0] > 31
  5361. // dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0)
  5362. // ELSE
  5363. // dst[i+31:i] := SignExtend32(a[i+31:i] >> count[63:0])
  5364. // FI
  5365. // ENDFOR
  5366. //
  5367. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sra_epi32
  5368. FORCE_INLINE __m128i _mm_sra_epi32(__m128i a, __m128i count)
  5369. {
  5370. int64_t c = (int64_t) vget_low_s64((int64x2_t) count);
  5371. if (_sse2neon_unlikely(c & ~31))
  5372. return _mm_cmplt_epi32(a, _mm_setzero_si128());
  5373. return vreinterpretq_m128i_s32(vshlq_s32((int32x4_t) a, vdupq_n_s32(-c)));
  5374. }
  5375. // Shift packed 16-bit integers in a right by imm8 while shifting in sign
  5376. // bits, and store the results in dst.
  5377. //
  5378. // FOR j := 0 to 7
  5379. // i := j*16
  5380. // IF imm8[7:0] > 15
  5381. // dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0)
  5382. // ELSE
  5383. // dst[i+15:i] := SignExtend16(a[i+15:i] >> imm8[7:0])
  5384. // FI
  5385. // ENDFOR
  5386. //
  5387. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srai_epi16
  5388. FORCE_INLINE __m128i _mm_srai_epi16(__m128i a, int imm)
  5389. {
  5390. const int count = (imm & ~15) ? 15 : imm;
  5391. return (__m128i) vshlq_s16((int16x8_t) a, vdupq_n_s16(-count));
  5392. }
  5393. // Shift packed 32-bit integers in a right by imm8 while shifting in sign bits,
  5394. // and store the results in dst.
  5395. //
  5396. // FOR j := 0 to 3
  5397. // i := j*32
  5398. // IF imm8[7:0] > 31
  5399. // dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0)
  5400. // ELSE
  5401. // dst[i+31:i] := SignExtend32(a[i+31:i] >> imm8[7:0])
  5402. // FI
  5403. // ENDFOR
  5404. //
  5405. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srai_epi32
  5406. // FORCE_INLINE __m128i _mm_srai_epi32(__m128i a, __constrange(0,255) int imm)
  5407. #define _mm_srai_epi32(a, imm) \
  5408. __extension__({ \
  5409. __m128i ret; \
  5410. if (_sse2neon_unlikely((imm) == 0)) { \
  5411. ret = a; \
  5412. } else if (_sse2neon_likely(0 < (imm) && (imm) < 32)) { \
  5413. ret = vreinterpretq_m128i_s32( \
  5414. vshlq_s32(vreinterpretq_s32_m128i(a), vdupq_n_s32(-(imm)))); \
  5415. } else { \
  5416. ret = vreinterpretq_m128i_s32( \
  5417. vshrq_n_s32(vreinterpretq_s32_m128i(a), 31)); \
  5418. } \
  5419. ret; \
  5420. })
  5421. // Shift packed 16-bit integers in a right by count while shifting in zeros, and
  5422. // store the results in dst.
  5423. //
  5424. // FOR j := 0 to 7
  5425. // i := j*16
  5426. // IF count[63:0] > 15
  5427. // dst[i+15:i] := 0
  5428. // ELSE
  5429. // dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[63:0])
  5430. // FI
  5431. // ENDFOR
  5432. //
  5433. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srl_epi16
  5434. FORCE_INLINE __m128i _mm_srl_epi16(__m128i a, __m128i count)
  5435. {
  5436. uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
  5437. if (_sse2neon_unlikely(c & ~15))
  5438. return _mm_setzero_si128();
  5439. int16x8_t vc = vdupq_n_s16(-(int16_t) c);
  5440. return vreinterpretq_m128i_u16(vshlq_u16(vreinterpretq_u16_m128i(a), vc));
  5441. }
  5442. // Shift packed 32-bit integers in a right by count while shifting in zeros, and
  5443. // store the results in dst.
  5444. //
  5445. // FOR j := 0 to 3
  5446. // i := j*32
  5447. // IF count[63:0] > 31
  5448. // dst[i+31:i] := 0
  5449. // ELSE
  5450. // dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[63:0])
  5451. // FI
  5452. // ENDFOR
  5453. //
  5454. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srl_epi32
  5455. FORCE_INLINE __m128i _mm_srl_epi32(__m128i a, __m128i count)
  5456. {
  5457. uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
  5458. if (_sse2neon_unlikely(c & ~31))
  5459. return _mm_setzero_si128();
  5460. int32x4_t vc = vdupq_n_s32(-(int32_t) c);
  5461. return vreinterpretq_m128i_u32(vshlq_u32(vreinterpretq_u32_m128i(a), vc));
  5462. }
  5463. // Shift packed 64-bit integers in a right by count while shifting in zeros, and
  5464. // store the results in dst.
  5465. //
  5466. // FOR j := 0 to 1
  5467. // i := j*64
  5468. // IF count[63:0] > 63
  5469. // dst[i+63:i] := 0
  5470. // ELSE
  5471. // dst[i+63:i] := ZeroExtend64(a[i+63:i] >> count[63:0])
  5472. // FI
  5473. // ENDFOR
  5474. //
  5475. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srl_epi64
  5476. FORCE_INLINE __m128i _mm_srl_epi64(__m128i a, __m128i count)
  5477. {
  5478. uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
  5479. if (_sse2neon_unlikely(c & ~63))
  5480. return _mm_setzero_si128();
  5481. int64x2_t vc = vdupq_n_s64(-(int64_t) c);
  5482. return vreinterpretq_m128i_u64(vshlq_u64(vreinterpretq_u64_m128i(a), vc));
  5483. }
  5484. // Shift packed 16-bit integers in a right by imm8 while shifting in zeros, and
  5485. // store the results in dst.
  5486. //
  5487. // FOR j := 0 to 7
  5488. // i := j*16
  5489. // IF imm8[7:0] > 15
  5490. // dst[i+15:i] := 0
  5491. // ELSE
  5492. // dst[i+15:i] := ZeroExtend16(a[i+15:i] >> imm8[7:0])
  5493. // FI
  5494. // ENDFOR
  5495. //
  5496. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srli_epi16
  5497. #define _mm_srli_epi16(a, imm) \
  5498. __extension__({ \
  5499. __m128i ret; \
  5500. if (_sse2neon_unlikely((imm) & ~15)) { \
  5501. ret = _mm_setzero_si128(); \
  5502. } else { \
  5503. ret = vreinterpretq_m128i_u16( \
  5504. vshlq_u16(vreinterpretq_u16_m128i(a), vdupq_n_s16(-(imm)))); \
  5505. } \
  5506. ret; \
  5507. })
  5508. // Shift packed 32-bit integers in a right by imm8 while shifting in zeros, and
  5509. // store the results in dst.
  5510. //
  5511. // FOR j := 0 to 3
  5512. // i := j*32
  5513. // IF imm8[7:0] > 31
  5514. // dst[i+31:i] := 0
  5515. // ELSE
  5516. // dst[i+31:i] := ZeroExtend32(a[i+31:i] >> imm8[7:0])
  5517. // FI
  5518. // ENDFOR
  5519. //
  5520. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srli_epi32
  5521. // FORCE_INLINE __m128i _mm_srli_epi32(__m128i a, __constrange(0,255) int imm)
  5522. #define _mm_srli_epi32(a, imm) \
  5523. __extension__({ \
  5524. __m128i ret; \
  5525. if (_sse2neon_unlikely((imm) & ~31)) { \
  5526. ret = _mm_setzero_si128(); \
  5527. } else { \
  5528. ret = vreinterpretq_m128i_u32( \
  5529. vshlq_u32(vreinterpretq_u32_m128i(a), vdupq_n_s32(-(imm)))); \
  5530. } \
  5531. ret; \
  5532. })
  5533. // Shift packed 64-bit integers in a right by imm8 while shifting in zeros, and
  5534. // store the results in dst.
  5535. //
  5536. // FOR j := 0 to 1
  5537. // i := j*64
  5538. // IF imm8[7:0] > 63
  5539. // dst[i+63:i] := 0
  5540. // ELSE
  5541. // dst[i+63:i] := ZeroExtend64(a[i+63:i] >> imm8[7:0])
  5542. // FI
  5543. // ENDFOR
  5544. //
  5545. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srli_epi64
  5546. #define _mm_srli_epi64(a, imm) \
  5547. __extension__({ \
  5548. __m128i ret; \
  5549. if (_sse2neon_unlikely((imm) & ~63)) { \
  5550. ret = _mm_setzero_si128(); \
  5551. } else { \
  5552. ret = vreinterpretq_m128i_u64( \
  5553. vshlq_u64(vreinterpretq_u64_m128i(a), vdupq_n_s64(-(imm)))); \
  5554. } \
  5555. ret; \
  5556. })
  5557. // Shift a right by imm8 bytes while shifting in zeros, and store the results in
  5558. // dst.
  5559. //
  5560. // tmp := imm8[7:0]
  5561. // IF tmp > 15
  5562. // tmp := 16
  5563. // FI
  5564. // dst[127:0] := a[127:0] >> (tmp*8)
  5565. //
  5566. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srli_si128
  5567. #define _mm_srli_si128(a, imm) \
  5568. __extension__({ \
  5569. int8x16_t ret; \
  5570. if (_sse2neon_unlikely((imm) & ~15)) \
  5571. ret = vdupq_n_s8(0); \
  5572. else \
  5573. ret = vextq_s8(vreinterpretq_s8_m128i(a), vdupq_n_s8(0), \
  5574. (imm > 15 ? 0 : imm)); \
  5575. vreinterpretq_m128i_s8(ret); \
  5576. })
  5577. // Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point
  5578. // elements) from a into memory. mem_addr must be aligned on a 16-byte boundary
  5579. // or a general-protection exception may be generated.
  5580. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_pd
  5581. FORCE_INLINE void _mm_store_pd(double *mem_addr, __m128d a)
  5582. {
  5583. #if defined(__aarch64__)
  5584. vst1q_f64((float64_t *) mem_addr, vreinterpretq_f64_m128d(a));
  5585. #else
  5586. vst1q_f32((float32_t *) mem_addr, vreinterpretq_f32_m128d(a));
  5587. #endif
  5588. }
  5589. // Store the lower double-precision (64-bit) floating-point element from a into
  5590. // 2 contiguous elements in memory. mem_addr must be aligned on a 16-byte
  5591. // boundary or a general-protection exception may be generated.
  5592. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_pd1
  5593. FORCE_INLINE void _mm_store_pd1(double *mem_addr, __m128d a)
  5594. {
  5595. #if defined(__aarch64__)
  5596. float64x1_t a_low = vget_low_f64(vreinterpretq_f64_m128d(a));
  5597. vst1q_f64((float64_t *) mem_addr,
  5598. vreinterpretq_f64_m128d(vcombine_f64(a_low, a_low)));
  5599. #else
  5600. float32x2_t a_low = vget_low_f32(vreinterpretq_f32_m128d(a));
  5601. vst1q_f32((float32_t *) mem_addr,
  5602. vreinterpretq_f32_m128d(vcombine_f32(a_low, a_low)));
  5603. #endif
  5604. }
  5605. // Store the lower double-precision (64-bit) floating-point element from a into
  5606. // memory. mem_addr does not need to be aligned on any particular boundary.
  5607. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_store_sd
  5608. FORCE_INLINE void _mm_store_sd(double *mem_addr, __m128d a)
  5609. {
  5610. #if defined(__aarch64__)
  5611. vst1_f64((float64_t *) mem_addr, vget_low_f64(vreinterpretq_f64_m128d(a)));
  5612. #else
  5613. vst1_u64((uint64_t *) mem_addr, vget_low_u64(vreinterpretq_u64_m128d(a)));
  5614. #endif
  5615. }
  5616. // Stores four 32-bit integer values as (as a __m128i value) at the address p.
  5617. // https://msdn.microsoft.com/en-us/library/vstudio/edk11s13(v=vs.100).aspx
  5618. FORCE_INLINE void _mm_store_si128(__m128i *p, __m128i a)
  5619. {
  5620. vst1q_s32((int32_t *) p, vreinterpretq_s32_m128i(a));
  5621. }
  5622. // Store the lower double-precision (64-bit) floating-point element from a into
  5623. // 2 contiguous elements in memory. mem_addr must be aligned on a 16-byte
  5624. // boundary or a general-protection exception may be generated.
  5625. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#expand=9,526,5601&text=_mm_store1_pd
  5626. #define _mm_store1_pd _mm_store_pd1
  5627. // Store the upper double-precision (64-bit) floating-point element from a into
  5628. // memory.
  5629. //
  5630. // MEM[mem_addr+63:mem_addr] := a[127:64]
  5631. //
  5632. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeh_pd
  5633. FORCE_INLINE void _mm_storeh_pd(double *mem_addr, __m128d a)
  5634. {
  5635. #if defined(__aarch64__)
  5636. vst1_f64((float64_t *) mem_addr, vget_high_f64(vreinterpretq_f64_m128d(a)));
  5637. #else
  5638. vst1_f32((float32_t *) mem_addr, vget_high_f32(vreinterpretq_f32_m128d(a)));
  5639. #endif
  5640. }
  5641. // Reads the lower 64 bits of b and stores them into the lower 64 bits of a.
  5642. // https://msdn.microsoft.com/en-us/library/hhwf428f%28v=vs.90%29.aspx
  5643. FORCE_INLINE void _mm_storel_epi64(__m128i *a, __m128i b)
  5644. {
  5645. vst1_u64((uint64_t *) a, vget_low_u64(vreinterpretq_u64_m128i(b)));
  5646. }
  5647. // Store the lower double-precision (64-bit) floating-point element from a into
  5648. // memory.
  5649. //
  5650. // MEM[mem_addr+63:mem_addr] := a[63:0]
  5651. //
  5652. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storel_pd
  5653. FORCE_INLINE void _mm_storel_pd(double *mem_addr, __m128d a)
  5654. {
  5655. #if defined(__aarch64__)
  5656. vst1_f64((float64_t *) mem_addr, vget_low_f64(vreinterpretq_f64_m128d(a)));
  5657. #else
  5658. vst1_f32((float32_t *) mem_addr, vget_low_f32(vreinterpretq_f32_m128d(a)));
  5659. #endif
  5660. }
  5661. // Store 2 double-precision (64-bit) floating-point elements from a into memory
  5662. // in reverse order. mem_addr must be aligned on a 16-byte boundary or a
  5663. // general-protection exception may be generated.
  5664. //
  5665. // MEM[mem_addr+63:mem_addr] := a[127:64]
  5666. // MEM[mem_addr+127:mem_addr+64] := a[63:0]
  5667. //
  5668. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storer_pd
  5669. FORCE_INLINE void _mm_storer_pd(double *mem_addr, __m128d a)
  5670. {
  5671. float32x4_t f = vreinterpretq_f32_m128d(a);
  5672. _mm_store_pd(mem_addr, vreinterpretq_m128d_f32(vextq_f32(f, f, 2)));
  5673. }
  5674. // Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point
  5675. // elements) from a into memory. mem_addr does not need to be aligned on any
  5676. // particular boundary.
  5677. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_pd
  5678. FORCE_INLINE void _mm_storeu_pd(double *mem_addr, __m128d a)
  5679. {
  5680. _mm_store_pd(mem_addr, a);
  5681. }
  5682. // Stores 128-bits of integer data a at the address p.
  5683. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_si128
  5684. FORCE_INLINE void _mm_storeu_si128(__m128i *p, __m128i a)
  5685. {
  5686. vst1q_s32((int32_t *) p, vreinterpretq_s32_m128i(a));
  5687. }
  5688. // Stores 32-bits of integer data a at the address p.
  5689. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_si32
  5690. FORCE_INLINE void _mm_storeu_si32(void *p, __m128i a)
  5691. {
  5692. vst1q_lane_s32((int32_t *) p, vreinterpretq_s32_m128i(a), 0);
  5693. }
  5694. // Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point
  5695. // elements) from a into memory using a non-temporal memory hint. mem_addr must
  5696. // be aligned on a 16-byte boundary or a general-protection exception may be
  5697. // generated.
  5698. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_pd
  5699. FORCE_INLINE void _mm_stream_pd(double *p, __m128d a)
  5700. {
  5701. #if __has_builtin(__builtin_nontemporal_store)
  5702. __builtin_nontemporal_store(reinterpret_cast<float32x4_t>(a), (float32x4_t *) p);
  5703. #elif defined(__aarch64__)
  5704. vst1q_f64(p, vreinterpretq_f64_m128d(a));
  5705. #else
  5706. vst1q_s64((int64_t *) p, vreinterpretq_s64_m128d(a));
  5707. #endif
  5708. }
  5709. // Stores the data in a to the address p without polluting the caches. If the
  5710. // cache line containing address p is already in the cache, the cache will be
  5711. // updated.
  5712. // https://msdn.microsoft.com/en-us/library/ba08y07y%28v=vs.90%29.aspx
  5713. FORCE_INLINE void _mm_stream_si128(__m128i *p, __m128i a)
  5714. {
  5715. #if __has_builtin(__builtin_nontemporal_store)
  5716. __builtin_nontemporal_store(a, p);
  5717. #else
  5718. vst1q_s64((int64_t *) p, vreinterpretq_s64_m128i(a));
  5719. #endif
  5720. }
  5721. // Store 32-bit integer a into memory using a non-temporal hint to minimize
  5722. // cache pollution. If the cache line containing address mem_addr is already in
  5723. // the cache, the cache will be updated.
  5724. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_si32
  5725. FORCE_INLINE void _mm_stream_si32(int *p, int a)
  5726. {
  5727. vst1q_lane_s32((int32_t *) p, vdupq_n_s32(a), 0);
  5728. }
  5729. // Store 64-bit integer a into memory using a non-temporal hint to minimize
  5730. // cache pollution. If the cache line containing address mem_addr is already in
  5731. // the cache, the cache will be updated.
  5732. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_si64
  5733. FORCE_INLINE void _mm_stream_si64(__int64 *p, __int64 a)
  5734. {
  5735. vst1_s64((int64_t *) p, vdup_n_s64((int64_t) a));
  5736. }
  5737. // Subtract packed 16-bit integers in b from packed 16-bit integers in a, and
  5738. // store the results in dst.
  5739. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_epi16
  5740. FORCE_INLINE __m128i _mm_sub_epi16(__m128i a, __m128i b)
  5741. {
  5742. return vreinterpretq_m128i_s16(
  5743. vsubq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
  5744. }
  5745. // Subtracts the 4 signed or unsigned 32-bit integers of b from the 4 signed or
  5746. // unsigned 32-bit integers of a.
  5747. //
  5748. // r0 := a0 - b0
  5749. // r1 := a1 - b1
  5750. // r2 := a2 - b2
  5751. // r3 := a3 - b3
  5752. //
  5753. // https://msdn.microsoft.com/en-us/library/vstudio/fhh866h0(v=vs.100).aspx
  5754. FORCE_INLINE __m128i _mm_sub_epi32(__m128i a, __m128i b)
  5755. {
  5756. return vreinterpretq_m128i_s32(
  5757. vsubq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
  5758. }
  5759. // Subtract 2 packed 64-bit integers in b from 2 packed 64-bit integers in a,
  5760. // and store the results in dst.
  5761. // r0 := a0 - b0
  5762. // r1 := a1 - b1
  5763. FORCE_INLINE __m128i _mm_sub_epi64(__m128i a, __m128i b)
  5764. {
  5765. return vreinterpretq_m128i_s64(
  5766. vsubq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b)));
  5767. }
  5768. // Subtract packed 8-bit integers in b from packed 8-bit integers in a, and
  5769. // store the results in dst.
  5770. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_epi8
  5771. FORCE_INLINE __m128i _mm_sub_epi8(__m128i a, __m128i b)
  5772. {
  5773. return vreinterpretq_m128i_s8(
  5774. vsubq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
  5775. }
  5776. // Subtract packed double-precision (64-bit) floating-point elements in b from
  5777. // packed double-precision (64-bit) floating-point elements in a, and store the
  5778. // results in dst.
  5779. //
  5780. // FOR j := 0 to 1
  5781. // i := j*64
  5782. // dst[i+63:i] := a[i+63:i] - b[i+63:i]
  5783. // ENDFOR
  5784. //
  5785. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_sub_pd
  5786. FORCE_INLINE __m128d _mm_sub_pd(__m128d a, __m128d b)
  5787. {
  5788. #if defined(__aarch64__)
  5789. return vreinterpretq_m128d_f64(
  5790. vsubq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
  5791. #else
  5792. double *da = (double *) &a;
  5793. double *db = (double *) &b;
  5794. double c[2];
  5795. c[0] = da[0] - db[0];
  5796. c[1] = da[1] - db[1];
  5797. return vld1q_f32((float32_t *) c);
  5798. #endif
  5799. }
  5800. // Subtract the lower double-precision (64-bit) floating-point element in b from
  5801. // the lower double-precision (64-bit) floating-point element in a, store the
  5802. // result in the lower element of dst, and copy the upper element from a to the
  5803. // upper element of dst.
  5804. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_sd
  5805. FORCE_INLINE __m128d _mm_sub_sd(__m128d a, __m128d b)
  5806. {
  5807. return _mm_move_sd(a, _mm_sub_pd(a, b));
  5808. }
  5809. // Subtract 64-bit integer b from 64-bit integer a, and store the result in dst.
  5810. //
  5811. // dst[63:0] := a[63:0] - b[63:0]
  5812. //
  5813. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_si64
  5814. FORCE_INLINE __m64 _mm_sub_si64(__m64 a, __m64 b)
  5815. {
  5816. return vreinterpret_m64_s64(
  5817. vsub_s64(vreinterpret_s64_m64(a), vreinterpret_s64_m64(b)));
  5818. }
  5819. // Subtracts the 8 signed 16-bit integers of b from the 8 signed 16-bit integers
  5820. // of a and saturates.
  5821. //
  5822. // r0 := SignedSaturate(a0 - b0)
  5823. // r1 := SignedSaturate(a1 - b1)
  5824. // ...
  5825. // r7 := SignedSaturate(a7 - b7)
  5826. //
  5827. // https://technet.microsoft.com/en-us/subscriptions/3247z5b8(v=vs.90)
  5828. FORCE_INLINE __m128i _mm_subs_epi16(__m128i a, __m128i b)
  5829. {
  5830. return vreinterpretq_m128i_s16(
  5831. vqsubq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
  5832. }
  5833. // Subtracts the 16 signed 8-bit integers of b from the 16 signed 8-bit integers
  5834. // of a and saturates.
  5835. //
  5836. // r0 := SignedSaturate(a0 - b0)
  5837. // r1 := SignedSaturate(a1 - b1)
  5838. // ...
  5839. // r15 := SignedSaturate(a15 - b15)
  5840. //
  5841. // https://technet.microsoft.com/en-us/subscriptions/by7kzks1(v=vs.90)
  5842. FORCE_INLINE __m128i _mm_subs_epi8(__m128i a, __m128i b)
  5843. {
  5844. return vreinterpretq_m128i_s8(
  5845. vqsubq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
  5846. }
  5847. // Subtracts the 8 unsigned 16-bit integers of bfrom the 8 unsigned 16-bit
  5848. // integers of a and saturates..
  5849. // https://technet.microsoft.com/en-us/subscriptions/index/f44y0s19(v=vs.90).aspx
  5850. FORCE_INLINE __m128i _mm_subs_epu16(__m128i a, __m128i b)
  5851. {
  5852. return vreinterpretq_m128i_u16(
  5853. vqsubq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b)));
  5854. }
  5855. // Subtracts the 16 unsigned 8-bit integers of b from the 16 unsigned 8-bit
  5856. // integers of a and saturates.
  5857. //
  5858. // r0 := UnsignedSaturate(a0 - b0)
  5859. // r1 := UnsignedSaturate(a1 - b1)
  5860. // ...
  5861. // r15 := UnsignedSaturate(a15 - b15)
  5862. //
  5863. // https://technet.microsoft.com/en-us/subscriptions/yadkxc18(v=vs.90)
  5864. FORCE_INLINE __m128i _mm_subs_epu8(__m128i a, __m128i b)
  5865. {
  5866. return vreinterpretq_m128i_u8(
  5867. vqsubq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
  5868. }
  5869. #define _mm_ucomieq_sd _mm_comieq_sd
  5870. #define _mm_ucomige_sd _mm_comige_sd
  5871. #define _mm_ucomigt_sd _mm_comigt_sd
  5872. #define _mm_ucomile_sd _mm_comile_sd
  5873. #define _mm_ucomilt_sd _mm_comilt_sd
  5874. #define _mm_ucomineq_sd _mm_comineq_sd
  5875. // Return vector of type __m128d with undefined elements.
  5876. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_undefined_pd
  5877. FORCE_INLINE __m128d _mm_undefined_pd(void)
  5878. {
  5879. #if defined(__GNUC__) || defined(__clang__)
  5880. #pragma GCC diagnostic push
  5881. #pragma GCC diagnostic ignored "-Wuninitialized"
  5882. #endif
  5883. __m128d a;
  5884. return a;
  5885. #if defined(__GNUC__) || defined(__clang__)
  5886. #pragma GCC diagnostic pop
  5887. #endif
  5888. }
  5889. // Interleaves the upper 4 signed or unsigned 16-bit integers in a with the
  5890. // upper 4 signed or unsigned 16-bit integers in b.
  5891. //
  5892. // r0 := a4
  5893. // r1 := b4
  5894. // r2 := a5
  5895. // r3 := b5
  5896. // r4 := a6
  5897. // r5 := b6
  5898. // r6 := a7
  5899. // r7 := b7
  5900. //
  5901. // https://msdn.microsoft.com/en-us/library/03196cz7(v=vs.100).aspx
  5902. FORCE_INLINE __m128i _mm_unpackhi_epi16(__m128i a, __m128i b)
  5903. {
  5904. #if defined(__aarch64__)
  5905. return vreinterpretq_m128i_s16(
  5906. vzip2q_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
  5907. #else
  5908. int16x4_t a1 = vget_high_s16(vreinterpretq_s16_m128i(a));
  5909. int16x4_t b1 = vget_high_s16(vreinterpretq_s16_m128i(b));
  5910. int16x4x2_t result = vzip_s16(a1, b1);
  5911. return vreinterpretq_m128i_s16(vcombine_s16(result.val[0], result.val[1]));
  5912. #endif
  5913. }
  5914. // Interleaves the upper 2 signed or unsigned 32-bit integers in a with the
  5915. // upper 2 signed or unsigned 32-bit integers in b.
  5916. // https://msdn.microsoft.com/en-us/library/65sa7cbs(v=vs.100).aspx
  5917. FORCE_INLINE __m128i _mm_unpackhi_epi32(__m128i a, __m128i b)
  5918. {
  5919. #if defined(__aarch64__)
  5920. return vreinterpretq_m128i_s32(
  5921. vzip2q_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
  5922. #else
  5923. int32x2_t a1 = vget_high_s32(vreinterpretq_s32_m128i(a));
  5924. int32x2_t b1 = vget_high_s32(vreinterpretq_s32_m128i(b));
  5925. int32x2x2_t result = vzip_s32(a1, b1);
  5926. return vreinterpretq_m128i_s32(vcombine_s32(result.val[0], result.val[1]));
  5927. #endif
  5928. }
  5929. // Interleaves the upper signed or unsigned 64-bit integer in a with the
  5930. // upper signed or unsigned 64-bit integer in b.
  5931. //
  5932. // r0 := a1
  5933. // r1 := b1
  5934. FORCE_INLINE __m128i _mm_unpackhi_epi64(__m128i a, __m128i b)
  5935. {
  5936. int64x1_t a_h = vget_high_s64(vreinterpretq_s64_m128i(a));
  5937. int64x1_t b_h = vget_high_s64(vreinterpretq_s64_m128i(b));
  5938. return vreinterpretq_m128i_s64(vcombine_s64(a_h, b_h));
  5939. }
  5940. // Interleaves the upper 8 signed or unsigned 8-bit integers in a with the upper
  5941. // 8 signed or unsigned 8-bit integers in b.
  5942. //
  5943. // r0 := a8
  5944. // r1 := b8
  5945. // r2 := a9
  5946. // r3 := b9
  5947. // ...
  5948. // r14 := a15
  5949. // r15 := b15
  5950. //
  5951. // https://msdn.microsoft.com/en-us/library/t5h7783k(v=vs.100).aspx
  5952. FORCE_INLINE __m128i _mm_unpackhi_epi8(__m128i a, __m128i b)
  5953. {
  5954. #if defined(__aarch64__)
  5955. return vreinterpretq_m128i_s8(
  5956. vzip2q_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
  5957. #else
  5958. int8x8_t a1 =
  5959. vreinterpret_s8_s16(vget_high_s16(vreinterpretq_s16_m128i(a)));
  5960. int8x8_t b1 =
  5961. vreinterpret_s8_s16(vget_high_s16(vreinterpretq_s16_m128i(b)));
  5962. int8x8x2_t result = vzip_s8(a1, b1);
  5963. return vreinterpretq_m128i_s8(vcombine_s8(result.val[0], result.val[1]));
  5964. #endif
  5965. }
  5966. // Unpack and interleave double-precision (64-bit) floating-point elements from
  5967. // the high half of a and b, and store the results in dst.
  5968. //
  5969. // DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) {
  5970. // dst[63:0] := src1[127:64]
  5971. // dst[127:64] := src2[127:64]
  5972. // RETURN dst[127:0]
  5973. // }
  5974. // dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0])
  5975. //
  5976. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_pd
  5977. FORCE_INLINE __m128d _mm_unpackhi_pd(__m128d a, __m128d b)
  5978. {
  5979. #if defined(__aarch64__)
  5980. return vreinterpretq_m128d_f64(
  5981. vzip2q_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
  5982. #else
  5983. return vreinterpretq_m128d_s64(
  5984. vcombine_s64(vget_high_s64(vreinterpretq_s64_m128d(a)),
  5985. vget_high_s64(vreinterpretq_s64_m128d(b))));
  5986. #endif
  5987. }
  5988. // Interleaves the lower 4 signed or unsigned 16-bit integers in a with the
  5989. // lower 4 signed or unsigned 16-bit integers in b.
  5990. //
  5991. // r0 := a0
  5992. // r1 := b0
  5993. // r2 := a1
  5994. // r3 := b1
  5995. // r4 := a2
  5996. // r5 := b2
  5997. // r6 := a3
  5998. // r7 := b3
  5999. //
  6000. // https://msdn.microsoft.com/en-us/library/btxb17bw%28v=vs.90%29.aspx
  6001. FORCE_INLINE __m128i _mm_unpacklo_epi16(__m128i a, __m128i b)
  6002. {
  6003. #if defined(__aarch64__)
  6004. return vreinterpretq_m128i_s16(
  6005. vzip1q_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
  6006. #else
  6007. int16x4_t a1 = vget_low_s16(vreinterpretq_s16_m128i(a));
  6008. int16x4_t b1 = vget_low_s16(vreinterpretq_s16_m128i(b));
  6009. int16x4x2_t result = vzip_s16(a1, b1);
  6010. return vreinterpretq_m128i_s16(vcombine_s16(result.val[0], result.val[1]));
  6011. #endif
  6012. }
  6013. // Interleaves the lower 2 signed or unsigned 32 - bit integers in a with the
  6014. // lower 2 signed or unsigned 32 - bit integers in b.
  6015. //
  6016. // r0 := a0
  6017. // r1 := b0
  6018. // r2 := a1
  6019. // r3 := b1
  6020. //
  6021. // https://msdn.microsoft.com/en-us/library/x8atst9d(v=vs.100).aspx
  6022. FORCE_INLINE __m128i _mm_unpacklo_epi32(__m128i a, __m128i b)
  6023. {
  6024. #if defined(__aarch64__)
  6025. return vreinterpretq_m128i_s32(
  6026. vzip1q_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
  6027. #else
  6028. int32x2_t a1 = vget_low_s32(vreinterpretq_s32_m128i(a));
  6029. int32x2_t b1 = vget_low_s32(vreinterpretq_s32_m128i(b));
  6030. int32x2x2_t result = vzip_s32(a1, b1);
  6031. return vreinterpretq_m128i_s32(vcombine_s32(result.val[0], result.val[1]));
  6032. #endif
  6033. }
  6034. FORCE_INLINE __m128i _mm_unpacklo_epi64(__m128i a, __m128i b)
  6035. {
  6036. int64x1_t a_l = vget_low_s64(vreinterpretq_s64_m128i(a));
  6037. int64x1_t b_l = vget_low_s64(vreinterpretq_s64_m128i(b));
  6038. return vreinterpretq_m128i_s64(vcombine_s64(a_l, b_l));
  6039. }
  6040. // Interleaves the lower 8 signed or unsigned 8-bit integers in a with the lower
  6041. // 8 signed or unsigned 8-bit integers in b.
  6042. //
  6043. // r0 := a0
  6044. // r1 := b0
  6045. // r2 := a1
  6046. // r3 := b1
  6047. // ...
  6048. // r14 := a7
  6049. // r15 := b7
  6050. //
  6051. // https://msdn.microsoft.com/en-us/library/xf7k860c%28v=vs.90%29.aspx
  6052. FORCE_INLINE __m128i _mm_unpacklo_epi8(__m128i a, __m128i b)
  6053. {
  6054. #if defined(__aarch64__)
  6055. return vreinterpretq_m128i_s8(
  6056. vzip1q_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
  6057. #else
  6058. int8x8_t a1 = vreinterpret_s8_s16(vget_low_s16(vreinterpretq_s16_m128i(a)));
  6059. int8x8_t b1 = vreinterpret_s8_s16(vget_low_s16(vreinterpretq_s16_m128i(b)));
  6060. int8x8x2_t result = vzip_s8(a1, b1);
  6061. return vreinterpretq_m128i_s8(vcombine_s8(result.val[0], result.val[1]));
  6062. #endif
  6063. }
  6064. // Unpack and interleave double-precision (64-bit) floating-point elements from
  6065. // the low half of a and b, and store the results in dst.
  6066. //
  6067. // DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) {
  6068. // dst[63:0] := src1[63:0]
  6069. // dst[127:64] := src2[63:0]
  6070. // RETURN dst[127:0]
  6071. // }
  6072. // dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0])
  6073. //
  6074. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_pd
  6075. FORCE_INLINE __m128d _mm_unpacklo_pd(__m128d a, __m128d b)
  6076. {
  6077. #if defined(__aarch64__)
  6078. return vreinterpretq_m128d_f64(
  6079. vzip1q_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
  6080. #else
  6081. return vreinterpretq_m128d_s64(
  6082. vcombine_s64(vget_low_s64(vreinterpretq_s64_m128d(a)),
  6083. vget_low_s64(vreinterpretq_s64_m128d(b))));
  6084. #endif
  6085. }
  6086. // Compute the bitwise XOR of packed double-precision (64-bit) floating-point
  6087. // elements in a and b, and store the results in dst.
  6088. //
  6089. // FOR j := 0 to 1
  6090. // i := j*64
  6091. // dst[i+63:i] := a[i+63:i] XOR b[i+63:i]
  6092. // ENDFOR
  6093. //
  6094. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_xor_pd
  6095. FORCE_INLINE __m128d _mm_xor_pd(__m128d a, __m128d b)
  6096. {
  6097. return vreinterpretq_m128d_s64(
  6098. veorq_s64(vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b)));
  6099. }
  6100. // Computes the bitwise XOR of the 128-bit value in a and the 128-bit value in
  6101. // b. https://msdn.microsoft.com/en-us/library/fzt08www(v=vs.100).aspx
  6102. FORCE_INLINE __m128i _mm_xor_si128(__m128i a, __m128i b)
  6103. {
  6104. return vreinterpretq_m128i_s32(
  6105. veorq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
  6106. }
  6107. /* SSE3 */
  6108. // Alternatively add and subtract packed double-precision (64-bit)
  6109. // floating-point elements in a to/from packed elements in b, and store the
  6110. // results in dst.
  6111. //
  6112. // FOR j := 0 to 1
  6113. // i := j*64
  6114. // IF ((j & 1) == 0)
  6115. // dst[i+63:i] := a[i+63:i] - b[i+63:i]
  6116. // ELSE
  6117. // dst[i+63:i] := a[i+63:i] + b[i+63:i]
  6118. // FI
  6119. // ENDFOR
  6120. //
  6121. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_addsub_pd
  6122. FORCE_INLINE __m128d _mm_addsub_pd(__m128d a, __m128d b)
  6123. {
  6124. _sse2neon_const __m128d mask = _mm_set_pd(1.0f, -1.0f);
  6125. #if defined(__aarch64__)
  6126. return vreinterpretq_m128d_f64(vfmaq_f64(vreinterpretq_f64_m128d(a),
  6127. vreinterpretq_f64_m128d(b),
  6128. vreinterpretq_f64_m128d(mask)));
  6129. #else
  6130. return _mm_add_pd(_mm_mul_pd(b, mask), a);
  6131. #endif
  6132. }
  6133. // Alternatively add and subtract packed single-precision (32-bit)
  6134. // floating-point elements in a to/from packed elements in b, and store the
  6135. // results in dst.
  6136. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=addsub_ps
  6137. FORCE_INLINE __m128 _mm_addsub_ps(__m128 a, __m128 b)
  6138. {
  6139. _sse2neon_const __m128 mask = _mm_setr_ps(-1.0f, 1.0f, -1.0f, 1.0f);
  6140. #if defined(__aarch64__) || defined(__ARM_FEATURE_FMA) /* VFPv4+ */
  6141. return vreinterpretq_m128_f32(vfmaq_f32(vreinterpretq_f32_m128(a),
  6142. vreinterpretq_f32_m128(mask),
  6143. vreinterpretq_f32_m128(b)));
  6144. #else
  6145. return _mm_add_ps(_mm_mul_ps(b, mask), a);
  6146. #endif
  6147. }
  6148. // Horizontally add adjacent pairs of double-precision (64-bit) floating-point
  6149. // elements in a and b, and pack the results in dst.
  6150. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadd_pd
  6151. FORCE_INLINE __m128d _mm_hadd_pd(__m128d a, __m128d b)
  6152. {
  6153. #if defined(__aarch64__)
  6154. return vreinterpretq_m128d_f64(
  6155. vpaddq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
  6156. #else
  6157. double *da = (double *) &a;
  6158. double *db = (double *) &b;
  6159. double c[] = {da[0] + da[1], db[0] + db[1]};
  6160. return vreinterpretq_m128d_u64(vld1q_u64((uint64_t *) c));
  6161. #endif
  6162. }
  6163. // Computes pairwise add of each argument as single-precision, floating-point
  6164. // values a and b.
  6165. // https://msdn.microsoft.com/en-us/library/yd9wecaa.aspx
  6166. FORCE_INLINE __m128 _mm_hadd_ps(__m128 a, __m128 b)
  6167. {
  6168. #if defined(__aarch64__)
  6169. return vreinterpretq_m128_f32(
  6170. vpaddq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
  6171. #else
  6172. float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
  6173. float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
  6174. float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
  6175. float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
  6176. return vreinterpretq_m128_f32(
  6177. vcombine_f32(vpadd_f32(a10, a32), vpadd_f32(b10, b32)));
  6178. #endif
  6179. }
  6180. // Horizontally subtract adjacent pairs of double-precision (64-bit)
  6181. // floating-point elements in a and b, and pack the results in dst.
  6182. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsub_pd
  6183. FORCE_INLINE __m128d _mm_hsub_pd(__m128d _a, __m128d _b)
  6184. {
  6185. #if defined(__aarch64__)
  6186. float64x2_t a = vreinterpretq_f64_m128d(_a);
  6187. float64x2_t b = vreinterpretq_f64_m128d(_b);
  6188. return vreinterpretq_m128d_f64(
  6189. vsubq_f64(vuzp1q_f64(a, b), vuzp2q_f64(a, b)));
  6190. #else
  6191. double *da = (double *) &_a;
  6192. double *db = (double *) &_b;
  6193. double c[] = {da[0] - da[1], db[0] - db[1]};
  6194. return vreinterpretq_m128d_u64(vld1q_u64((uint64_t *) c));
  6195. #endif
  6196. }
  6197. // Horizontally subtract adjacent pairs of single-precision (32-bit)
  6198. // floating-point elements in a and b, and pack the results in dst.
  6199. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsub_ps
  6200. FORCE_INLINE __m128 _mm_hsub_ps(__m128 _a, __m128 _b)
  6201. {
  6202. float32x4_t a = vreinterpretq_f32_m128(_a);
  6203. float32x4_t b = vreinterpretq_f32_m128(_b);
  6204. #if defined(__aarch64__)
  6205. return vreinterpretq_m128_f32(
  6206. vsubq_f32(vuzp1q_f32(a, b), vuzp2q_f32(a, b)));
  6207. #else
  6208. float32x4x2_t c = vuzpq_f32(a, b);
  6209. return vreinterpretq_m128_f32(vsubq_f32(c.val[0], c.val[1]));
  6210. #endif
  6211. }
  6212. // Load 128-bits of integer data from unaligned memory into dst. This intrinsic
  6213. // may perform better than _mm_loadu_si128 when the data crosses a cache line
  6214. // boundary.
  6215. //
  6216. // dst[127:0] := MEM[mem_addr+127:mem_addr]
  6217. //
  6218. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_lddqu_si128
  6219. #define _mm_lddqu_si128 _mm_loadu_si128
  6220. // Load a double-precision (64-bit) floating-point element from memory into both
  6221. // elements of dst.
  6222. //
  6223. // dst[63:0] := MEM[mem_addr+63:mem_addr]
  6224. // dst[127:64] := MEM[mem_addr+63:mem_addr]
  6225. //
  6226. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loaddup_pd
  6227. #define _mm_loaddup_pd _mm_load1_pd
  6228. // Duplicate the low double-precision (64-bit) floating-point element from a,
  6229. // and store the results in dst.
  6230. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movedup_pd
  6231. FORCE_INLINE __m128d _mm_movedup_pd(__m128d a)
  6232. {
  6233. #if defined(__aarch64__)
  6234. return vreinterpretq_m128d_f64(
  6235. vdupq_laneq_f64(vreinterpretq_f64_m128d(a), 0));
  6236. #else
  6237. return vreinterpretq_m128d_u64(
  6238. vdupq_n_u64(vgetq_lane_u64(vreinterpretq_u64_m128d(a), 0)));
  6239. #endif
  6240. }
  6241. // Duplicate odd-indexed single-precision (32-bit) floating-point elements
  6242. // from a, and store the results in dst.
  6243. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movehdup_ps
  6244. FORCE_INLINE __m128 _mm_movehdup_ps(__m128 a)
  6245. {
  6246. #if defined(__aarch64__)
  6247. return vreinterpretq_m128_f32(
  6248. vtrn2q_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a)));
  6249. #elif defined(_sse2neon_shuffle)
  6250. return vreinterpretq_m128_f32(vshuffleq_s32(
  6251. vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 1, 1, 3, 3));
  6252. #else
  6253. float32_t a1 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 1);
  6254. float32_t a3 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 3);
  6255. float ALIGN_STRUCT(16) data[4] = {a1, a1, a3, a3};
  6256. return vreinterpretq_m128_f32(vld1q_f32(data));
  6257. #endif
  6258. }
  6259. // Duplicate even-indexed single-precision (32-bit) floating-point elements
  6260. // from a, and store the results in dst.
  6261. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_moveldup_ps
  6262. FORCE_INLINE __m128 _mm_moveldup_ps(__m128 a)
  6263. {
  6264. #if defined(__aarch64__)
  6265. return vreinterpretq_m128_f32(
  6266. vtrn1q_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a)));
  6267. #elif defined(_sse2neon_shuffle)
  6268. return vreinterpretq_m128_f32(vshuffleq_s32(
  6269. vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 0, 0, 2, 2));
  6270. #else
  6271. float32_t a0 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
  6272. float32_t a2 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 2);
  6273. float ALIGN_STRUCT(16) data[4] = {a0, a0, a2, a2};
  6274. return vreinterpretq_m128_f32(vld1q_f32(data));
  6275. #endif
  6276. }
  6277. /* SSSE3 */
  6278. // Compute the absolute value of packed signed 16-bit integers in a, and store
  6279. // the unsigned results in dst.
  6280. //
  6281. // FOR j := 0 to 7
  6282. // i := j*16
  6283. // dst[i+15:i] := ABS(a[i+15:i])
  6284. // ENDFOR
  6285. //
  6286. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_abs_epi16
  6287. FORCE_INLINE __m128i _mm_abs_epi16(__m128i a)
  6288. {
  6289. return vreinterpretq_m128i_s16(vabsq_s16(vreinterpretq_s16_m128i(a)));
  6290. }
  6291. // Compute the absolute value of packed signed 32-bit integers in a, and store
  6292. // the unsigned results in dst.
  6293. //
  6294. // FOR j := 0 to 3
  6295. // i := j*32
  6296. // dst[i+31:i] := ABS(a[i+31:i])
  6297. // ENDFOR
  6298. //
  6299. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_abs_epi32
  6300. FORCE_INLINE __m128i _mm_abs_epi32(__m128i a)
  6301. {
  6302. return vreinterpretq_m128i_s32(vabsq_s32(vreinterpretq_s32_m128i(a)));
  6303. }
  6304. // Compute the absolute value of packed signed 8-bit integers in a, and store
  6305. // the unsigned results in dst.
  6306. //
  6307. // FOR j := 0 to 15
  6308. // i := j*8
  6309. // dst[i+7:i] := ABS(a[i+7:i])
  6310. // ENDFOR
  6311. //
  6312. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_abs_epi8
  6313. FORCE_INLINE __m128i _mm_abs_epi8(__m128i a)
  6314. {
  6315. return vreinterpretq_m128i_s8(vabsq_s8(vreinterpretq_s8_m128i(a)));
  6316. }
  6317. // Compute the absolute value of packed signed 16-bit integers in a, and store
  6318. // the unsigned results in dst.
  6319. //
  6320. // FOR j := 0 to 3
  6321. // i := j*16
  6322. // dst[i+15:i] := ABS(a[i+15:i])
  6323. // ENDFOR
  6324. //
  6325. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_abs_pi16
  6326. FORCE_INLINE __m64 _mm_abs_pi16(__m64 a)
  6327. {
  6328. return vreinterpret_m64_s16(vabs_s16(vreinterpret_s16_m64(a)));
  6329. }
  6330. // Compute the absolute value of packed signed 32-bit integers in a, and store
  6331. // the unsigned results in dst.
  6332. //
  6333. // FOR j := 0 to 1
  6334. // i := j*32
  6335. // dst[i+31:i] := ABS(a[i+31:i])
  6336. // ENDFOR
  6337. //
  6338. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_abs_pi32
  6339. FORCE_INLINE __m64 _mm_abs_pi32(__m64 a)
  6340. {
  6341. return vreinterpret_m64_s32(vabs_s32(vreinterpret_s32_m64(a)));
  6342. }
  6343. // Compute the absolute value of packed signed 8-bit integers in a, and store
  6344. // the unsigned results in dst.
  6345. //
  6346. // FOR j := 0 to 7
  6347. // i := j*8
  6348. // dst[i+7:i] := ABS(a[i+7:i])
  6349. // ENDFOR
  6350. //
  6351. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_abs_pi8
  6352. FORCE_INLINE __m64 _mm_abs_pi8(__m64 a)
  6353. {
  6354. return vreinterpret_m64_s8(vabs_s8(vreinterpret_s8_m64(a)));
  6355. }
  6356. // Concatenate 16-byte blocks in a and b into a 32-byte temporary result, shift
  6357. // the result right by imm8 bytes, and store the low 16 bytes in dst.
  6358. //
  6359. // tmp[255:0] := ((a[127:0] << 128)[255:0] OR b[127:0]) >> (imm8*8)
  6360. // dst[127:0] := tmp[127:0]
  6361. //
  6362. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_alignr_epi8
  6363. #define _mm_alignr_epi8(a, b, imm) \
  6364. __extension__({ \
  6365. uint8x16_t _a = vreinterpretq_u8_m128i(a); \
  6366. uint8x16_t _b = vreinterpretq_u8_m128i(b); \
  6367. __m128i ret; \
  6368. if (_sse2neon_unlikely((imm) & ~31)) \
  6369. ret = vreinterpretq_m128i_u8(vdupq_n_u8(0)); \
  6370. else if (imm >= 16) \
  6371. ret = _mm_srli_si128(a, imm >= 16 ? imm - 16 : 0); \
  6372. else \
  6373. ret = \
  6374. vreinterpretq_m128i_u8(vextq_u8(_b, _a, imm < 16 ? imm : 0)); \
  6375. ret; \
  6376. })
  6377. // Concatenate 8-byte blocks in a and b into a 16-byte temporary result, shift
  6378. // the result right by imm8 bytes, and store the low 8 bytes in dst.
  6379. //
  6380. // tmp[127:0] := ((a[63:0] << 64)[127:0] OR b[63:0]) >> (imm8*8)
  6381. // dst[63:0] := tmp[63:0]
  6382. //
  6383. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_alignr_pi8
  6384. #define _mm_alignr_pi8(a, b, imm) \
  6385. __extension__({ \
  6386. __m64 ret; \
  6387. if (_sse2neon_unlikely((imm) >= 16)) { \
  6388. ret = vreinterpret_m64_s8(vdup_n_s8(0)); \
  6389. } else { \
  6390. uint8x8_t tmp_low, tmp_high; \
  6391. if ((imm) >= 8) { \
  6392. const int idx = (imm) -8; \
  6393. tmp_low = vreinterpret_u8_m64(a); \
  6394. tmp_high = vdup_n_u8(0); \
  6395. ret = vreinterpret_m64_u8(vext_u8(tmp_low, tmp_high, idx)); \
  6396. } else { \
  6397. const int idx = (imm); \
  6398. tmp_low = vreinterpret_u8_m64(b); \
  6399. tmp_high = vreinterpret_u8_m64(a); \
  6400. ret = vreinterpret_m64_u8(vext_u8(tmp_low, tmp_high, idx)); \
  6401. } \
  6402. } \
  6403. ret; \
  6404. })
  6405. // Computes pairwise add of each argument as a 16-bit signed or unsigned integer
  6406. // values a and b.
  6407. FORCE_INLINE __m128i _mm_hadd_epi16(__m128i _a, __m128i _b)
  6408. {
  6409. int16x8_t a = vreinterpretq_s16_m128i(_a);
  6410. int16x8_t b = vreinterpretq_s16_m128i(_b);
  6411. #if defined(__aarch64__)
  6412. return vreinterpretq_m128i_s16(vpaddq_s16(a, b));
  6413. #else
  6414. return vreinterpretq_m128i_s16(
  6415. vcombine_s16(vpadd_s16(vget_low_s16(a), vget_high_s16(a)),
  6416. vpadd_s16(vget_low_s16(b), vget_high_s16(b))));
  6417. #endif
  6418. }
  6419. // Computes pairwise add of each argument as a 32-bit signed or unsigned integer
  6420. // values a and b.
  6421. FORCE_INLINE __m128i _mm_hadd_epi32(__m128i _a, __m128i _b)
  6422. {
  6423. int32x4_t a = vreinterpretq_s32_m128i(_a);
  6424. int32x4_t b = vreinterpretq_s32_m128i(_b);
  6425. #if defined(__aarch64__)
  6426. return vreinterpretq_m128i_s32(vpaddq_s32(a, b));
  6427. #else
  6428. return vreinterpretq_m128i_s32(
  6429. vcombine_s32(vpadd_s32(vget_low_s32(a), vget_high_s32(a)),
  6430. vpadd_s32(vget_low_s32(b), vget_high_s32(b))));
  6431. #endif
  6432. }
  6433. // Horizontally add adjacent pairs of 16-bit integers in a and b, and pack the
  6434. // signed 16-bit results in dst.
  6435. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadd_pi16
  6436. FORCE_INLINE __m64 _mm_hadd_pi16(__m64 a, __m64 b)
  6437. {
  6438. return vreinterpret_m64_s16(
  6439. vpadd_s16(vreinterpret_s16_m64(a), vreinterpret_s16_m64(b)));
  6440. }
  6441. // Horizontally add adjacent pairs of 32-bit integers in a and b, and pack the
  6442. // signed 32-bit results in dst.
  6443. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadd_pi32
  6444. FORCE_INLINE __m64 _mm_hadd_pi32(__m64 a, __m64 b)
  6445. {
  6446. return vreinterpret_m64_s32(
  6447. vpadd_s32(vreinterpret_s32_m64(a), vreinterpret_s32_m64(b)));
  6448. }
  6449. // Computes saturated pairwise sub of each argument as a 16-bit signed
  6450. // integer values a and b.
  6451. FORCE_INLINE __m128i _mm_hadds_epi16(__m128i _a, __m128i _b)
  6452. {
  6453. #if defined(__aarch64__)
  6454. int16x8_t a = vreinterpretq_s16_m128i(_a);
  6455. int16x8_t b = vreinterpretq_s16_m128i(_b);
  6456. return vreinterpretq_s64_s16(
  6457. vqaddq_s16(vuzp1q_s16(a, b), vuzp2q_s16(a, b)));
  6458. #else
  6459. int32x4_t a = vreinterpretq_s32_m128i(_a);
  6460. int32x4_t b = vreinterpretq_s32_m128i(_b);
  6461. // Interleave using vshrn/vmovn
  6462. // [a0|a2|a4|a6|b0|b2|b4|b6]
  6463. // [a1|a3|a5|a7|b1|b3|b5|b7]
  6464. int16x8_t ab0246 = vcombine_s16(vmovn_s32(a), vmovn_s32(b));
  6465. int16x8_t ab1357 = vcombine_s16(vshrn_n_s32(a, 16), vshrn_n_s32(b, 16));
  6466. // Saturated add
  6467. return vreinterpretq_m128i_s16(vqaddq_s16(ab0246, ab1357));
  6468. #endif
  6469. }
  6470. // Horizontally add adjacent pairs of signed 16-bit integers in a and b using
  6471. // saturation, and pack the signed 16-bit results in dst.
  6472. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadds_pi16
  6473. FORCE_INLINE __m64 _mm_hadds_pi16(__m64 _a, __m64 _b)
  6474. {
  6475. int16x4_t a = vreinterpret_s16_m64(_a);
  6476. int16x4_t b = vreinterpret_s16_m64(_b);
  6477. #if defined(__aarch64__)
  6478. return vreinterpret_s64_s16(vqadd_s16(vuzp1_s16(a, b), vuzp2_s16(a, b)));
  6479. #else
  6480. int16x4x2_t res = vuzp_s16(a, b);
  6481. return vreinterpret_s64_s16(vqadd_s16(res.val[0], res.val[1]));
  6482. #endif
  6483. }
  6484. // Horizontally subtract adjacent pairs of 16-bit integers in a and b, and pack
  6485. // the signed 16-bit results in dst.
  6486. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsub_epi16
  6487. FORCE_INLINE __m128i _mm_hsub_epi16(__m128i _a, __m128i _b)
  6488. {
  6489. int16x8_t a = vreinterpretq_s16_m128i(_a);
  6490. int16x8_t b = vreinterpretq_s16_m128i(_b);
  6491. #if defined(__aarch64__)
  6492. return vreinterpretq_m128i_s16(
  6493. vsubq_s16(vuzp1q_s16(a, b), vuzp2q_s16(a, b)));
  6494. #else
  6495. int16x8x2_t c = vuzpq_s16(a, b);
  6496. return vreinterpretq_m128i_s16(vsubq_s16(c.val[0], c.val[1]));
  6497. #endif
  6498. }
  6499. // Horizontally subtract adjacent pairs of 32-bit integers in a and b, and pack
  6500. // the signed 32-bit results in dst.
  6501. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsub_epi32
  6502. FORCE_INLINE __m128i _mm_hsub_epi32(__m128i _a, __m128i _b)
  6503. {
  6504. int32x4_t a = vreinterpretq_s32_m128i(_a);
  6505. int32x4_t b = vreinterpretq_s32_m128i(_b);
  6506. #if defined(__aarch64__)
  6507. return vreinterpretq_m128i_s32(
  6508. vsubq_s32(vuzp1q_s32(a, b), vuzp2q_s32(a, b)));
  6509. #else
  6510. int32x4x2_t c = vuzpq_s32(a, b);
  6511. return vreinterpretq_m128i_s32(vsubq_s32(c.val[0], c.val[1]));
  6512. #endif
  6513. }
  6514. // Horizontally subtract adjacent pairs of 16-bit integers in a and b, and pack
  6515. // the signed 16-bit results in dst.
  6516. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsub_pi16
  6517. FORCE_INLINE __m64 _mm_hsub_pi16(__m64 _a, __m64 _b)
  6518. {
  6519. int16x4_t a = vreinterpret_s16_m64(_a);
  6520. int16x4_t b = vreinterpret_s16_m64(_b);
  6521. #if defined(__aarch64__)
  6522. return vreinterpret_m64_s16(vsub_s16(vuzp1_s16(a, b), vuzp2_s16(a, b)));
  6523. #else
  6524. int16x4x2_t c = vuzp_s16(a, b);
  6525. return vreinterpret_m64_s16(vsub_s16(c.val[0], c.val[1]));
  6526. #endif
  6527. }
  6528. // Horizontally subtract adjacent pairs of 32-bit integers in a and b, and pack
  6529. // the signed 32-bit results in dst.
  6530. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_hsub_pi32
  6531. FORCE_INLINE __m64 _mm_hsub_pi32(__m64 _a, __m64 _b)
  6532. {
  6533. int32x2_t a = vreinterpret_s32_m64(_a);
  6534. int32x2_t b = vreinterpret_s32_m64(_b);
  6535. #if defined(__aarch64__)
  6536. return vreinterpret_m64_s32(vsub_s32(vuzp1_s32(a, b), vuzp2_s32(a, b)));
  6537. #else
  6538. int32x2x2_t c = vuzp_s32(a, b);
  6539. return vreinterpret_m64_s32(vsub_s32(c.val[0], c.val[1]));
  6540. #endif
  6541. }
  6542. // Computes saturated pairwise difference of each argument as a 16-bit signed
  6543. // integer values a and b.
  6544. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsubs_epi16
  6545. FORCE_INLINE __m128i _mm_hsubs_epi16(__m128i _a, __m128i _b)
  6546. {
  6547. int16x8_t a = vreinterpretq_s16_m128i(_a);
  6548. int16x8_t b = vreinterpretq_s16_m128i(_b);
  6549. #if defined(__aarch64__)
  6550. return vreinterpretq_m128i_s16(
  6551. vqsubq_s16(vuzp1q_s16(a, b), vuzp2q_s16(a, b)));
  6552. #else
  6553. int16x8x2_t c = vuzpq_s16(a, b);
  6554. return vreinterpretq_m128i_s16(vqsubq_s16(c.val[0], c.val[1]));
  6555. #endif
  6556. }
  6557. // Horizontally subtract adjacent pairs of signed 16-bit integers in a and b
  6558. // using saturation, and pack the signed 16-bit results in dst.
  6559. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsubs_pi16
  6560. FORCE_INLINE __m64 _mm_hsubs_pi16(__m64 _a, __m64 _b)
  6561. {
  6562. int16x4_t a = vreinterpret_s16_m64(_a);
  6563. int16x4_t b = vreinterpret_s16_m64(_b);
  6564. #if defined(__aarch64__)
  6565. return vreinterpret_m64_s16(vqsub_s16(vuzp1_s16(a, b), vuzp2_s16(a, b)));
  6566. #else
  6567. int16x4x2_t c = vuzp_s16(a, b);
  6568. return vreinterpret_m64_s16(vqsub_s16(c.val[0], c.val[1]));
  6569. #endif
  6570. }
  6571. // Vertically multiply each unsigned 8-bit integer from a with the corresponding
  6572. // signed 8-bit integer from b, producing intermediate signed 16-bit integers.
  6573. // Horizontally add adjacent pairs of intermediate signed 16-bit integers,
  6574. // and pack the saturated results in dst.
  6575. //
  6576. // FOR j := 0 to 7
  6577. // i := j*16
  6578. // dst[i+15:i] := Saturate_To_Int16( a[i+15:i+8]*b[i+15:i+8] +
  6579. // a[i+7:i]*b[i+7:i] )
  6580. // ENDFOR
  6581. FORCE_INLINE __m128i _mm_maddubs_epi16(__m128i _a, __m128i _b)
  6582. {
  6583. #if defined(__aarch64__)
  6584. uint8x16_t a = vreinterpretq_u8_m128i(_a);
  6585. int8x16_t b = vreinterpretq_s8_m128i(_b);
  6586. int16x8_t tl = vmulq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(a))),
  6587. vmovl_s8(vget_low_s8(b)));
  6588. int16x8_t th = vmulq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(a))),
  6589. vmovl_s8(vget_high_s8(b)));
  6590. return vreinterpretq_m128i_s16(
  6591. vqaddq_s16(vuzp1q_s16(tl, th), vuzp2q_s16(tl, th)));
  6592. #else
  6593. // This would be much simpler if x86 would choose to zero extend OR sign
  6594. // extend, not both. This could probably be optimized better.
  6595. uint16x8_t a = vreinterpretq_u16_m128i(_a);
  6596. int16x8_t b = vreinterpretq_s16_m128i(_b);
  6597. // Zero extend a
  6598. int16x8_t a_odd = vreinterpretq_s16_u16(vshrq_n_u16(a, 8));
  6599. int16x8_t a_even = vreinterpretq_s16_u16(vbicq_u16(a, vdupq_n_u16(0xff00)));
  6600. // Sign extend by shifting left then shifting right.
  6601. int16x8_t b_even = vshrq_n_s16(vshlq_n_s16(b, 8), 8);
  6602. int16x8_t b_odd = vshrq_n_s16(b, 8);
  6603. // multiply
  6604. int16x8_t prod1 = vmulq_s16(a_even, b_even);
  6605. int16x8_t prod2 = vmulq_s16(a_odd, b_odd);
  6606. // saturated add
  6607. return vreinterpretq_m128i_s16(vqaddq_s16(prod1, prod2));
  6608. #endif
  6609. }
  6610. // Vertically multiply each unsigned 8-bit integer from a with the corresponding
  6611. // signed 8-bit integer from b, producing intermediate signed 16-bit integers.
  6612. // Horizontally add adjacent pairs of intermediate signed 16-bit integers, and
  6613. // pack the saturated results in dst.
  6614. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maddubs_pi16
  6615. FORCE_INLINE __m64 _mm_maddubs_pi16(__m64 _a, __m64 _b)
  6616. {
  6617. uint16x4_t a = vreinterpret_u16_m64(_a);
  6618. int16x4_t b = vreinterpret_s16_m64(_b);
  6619. // Zero extend a
  6620. int16x4_t a_odd = vreinterpret_s16_u16(vshr_n_u16(a, 8));
  6621. int16x4_t a_even = vreinterpret_s16_u16(vand_u16(a, vdup_n_u16(0xff)));
  6622. // Sign extend by shifting left then shifting right.
  6623. int16x4_t b_even = vshr_n_s16(vshl_n_s16(b, 8), 8);
  6624. int16x4_t b_odd = vshr_n_s16(b, 8);
  6625. // multiply
  6626. int16x4_t prod1 = vmul_s16(a_even, b_even);
  6627. int16x4_t prod2 = vmul_s16(a_odd, b_odd);
  6628. // saturated add
  6629. return vreinterpret_m64_s16(vqadd_s16(prod1, prod2));
  6630. }
  6631. // Multiply packed signed 16-bit integers in a and b, producing intermediate
  6632. // signed 32-bit integers. Shift right by 15 bits while rounding up, and store
  6633. // the packed 16-bit integers in dst.
  6634. //
  6635. // r0 := Round(((int32_t)a0 * (int32_t)b0) >> 15)
  6636. // r1 := Round(((int32_t)a1 * (int32_t)b1) >> 15)
  6637. // r2 := Round(((int32_t)a2 * (int32_t)b2) >> 15)
  6638. // ...
  6639. // r7 := Round(((int32_t)a7 * (int32_t)b7) >> 15)
  6640. FORCE_INLINE __m128i _mm_mulhrs_epi16(__m128i a, __m128i b)
  6641. {
  6642. // Has issues due to saturation
  6643. // return vreinterpretq_m128i_s16(vqrdmulhq_s16(a, b));
  6644. // Multiply
  6645. int32x4_t mul_lo = vmull_s16(vget_low_s16(vreinterpretq_s16_m128i(a)),
  6646. vget_low_s16(vreinterpretq_s16_m128i(b)));
  6647. int32x4_t mul_hi = vmull_s16(vget_high_s16(vreinterpretq_s16_m128i(a)),
  6648. vget_high_s16(vreinterpretq_s16_m128i(b)));
  6649. // Rounding narrowing shift right
  6650. // narrow = (int16_t)((mul + 16384) >> 15);
  6651. int16x4_t narrow_lo = vrshrn_n_s32(mul_lo, 15);
  6652. int16x4_t narrow_hi = vrshrn_n_s32(mul_hi, 15);
  6653. // Join together
  6654. return vreinterpretq_m128i_s16(vcombine_s16(narrow_lo, narrow_hi));
  6655. }
  6656. // Multiply packed signed 16-bit integers in a and b, producing intermediate
  6657. // signed 32-bit integers. Truncate each intermediate integer to the 18 most
  6658. // significant bits, round by adding 1, and store bits [16:1] to dst.
  6659. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mulhrs_pi16
  6660. FORCE_INLINE __m64 _mm_mulhrs_pi16(__m64 a, __m64 b)
  6661. {
  6662. int32x4_t mul_extend =
  6663. vmull_s16((vreinterpret_s16_m64(a)), (vreinterpret_s16_m64(b)));
  6664. // Rounding narrowing shift right
  6665. return vreinterpret_m64_s16(vrshrn_n_s32(mul_extend, 15));
  6666. }
  6667. // Shuffle packed 8-bit integers in a according to shuffle control mask in the
  6668. // corresponding 8-bit element of b, and store the results in dst.
  6669. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_epi8
  6670. FORCE_INLINE __m128i _mm_shuffle_epi8(__m128i a, __m128i b)
  6671. {
  6672. int8x16_t tbl = vreinterpretq_s8_m128i(a); // input a
  6673. uint8x16_t idx = vreinterpretq_u8_m128i(b); // input b
  6674. uint8x16_t idx_masked =
  6675. vandq_u8(idx, vdupq_n_u8(0x8F)); // avoid using meaningless bits
  6676. #if defined(__aarch64__)
  6677. return vreinterpretq_m128i_s8(vqtbl1q_s8(tbl, idx_masked));
  6678. #elif defined(__GNUC__)
  6679. int8x16_t ret;
  6680. // %e and %f represent the even and odd D registers
  6681. // respectively.
  6682. __asm__ __volatile__(
  6683. "vtbl.8 %e[ret], {%e[tbl], %f[tbl]}, %e[idx]\n"
  6684. "vtbl.8 %f[ret], {%e[tbl], %f[tbl]}, %f[idx]\n"
  6685. : [ret] "=&w"(ret)
  6686. : [tbl] "w"(tbl), [idx] "w"(idx_masked));
  6687. return vreinterpretq_m128i_s8(ret);
  6688. #else
  6689. // use this line if testing on aarch64
  6690. int8x8x2_t a_split = {vget_low_s8(tbl), vget_high_s8(tbl)};
  6691. return vreinterpretq_m128i_s8(
  6692. vcombine_s8(vtbl2_s8(a_split, vget_low_u8(idx_masked)),
  6693. vtbl2_s8(a_split, vget_high_u8(idx_masked))));
  6694. #endif
  6695. }
  6696. // Shuffle packed 8-bit integers in a according to shuffle control mask in the
  6697. // corresponding 8-bit element of b, and store the results in dst.
  6698. //
  6699. // FOR j := 0 to 7
  6700. // i := j*8
  6701. // IF b[i+7] == 1
  6702. // dst[i+7:i] := 0
  6703. // ELSE
  6704. // index[2:0] := b[i+2:i]
  6705. // dst[i+7:i] := a[index*8+7:index*8]
  6706. // FI
  6707. // ENDFOR
  6708. //
  6709. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_pi8
  6710. FORCE_INLINE __m64 _mm_shuffle_pi8(__m64 a, __m64 b)
  6711. {
  6712. const int8x8_t controlMask =
  6713. vand_s8(vreinterpret_s8_m64(b), vdup_n_s8((int8_t) (0x1 << 7 | 0x07)));
  6714. int8x8_t res = vtbl1_s8(vreinterpret_s8_m64(a), controlMask);
  6715. return vreinterpret_m64_s8(res);
  6716. }
  6717. // Negate packed 16-bit integers in a when the corresponding signed
  6718. // 16-bit integer in b is negative, and store the results in dst.
  6719. // Element in dst are zeroed out when the corresponding element
  6720. // in b is zero.
  6721. //
  6722. // for i in 0..7
  6723. // if b[i] < 0
  6724. // r[i] := -a[i]
  6725. // else if b[i] == 0
  6726. // r[i] := 0
  6727. // else
  6728. // r[i] := a[i]
  6729. // fi
  6730. // done
  6731. FORCE_INLINE __m128i _mm_sign_epi16(__m128i _a, __m128i _b)
  6732. {
  6733. int16x8_t a = vreinterpretq_s16_m128i(_a);
  6734. int16x8_t b = vreinterpretq_s16_m128i(_b);
  6735. // signed shift right: faster than vclt
  6736. // (b < 0) ? 0xFFFF : 0
  6737. uint16x8_t ltMask = vreinterpretq_u16_s16(vshrq_n_s16(b, 15));
  6738. // (b == 0) ? 0xFFFF : 0
  6739. #if defined(__aarch64__)
  6740. int16x8_t zeroMask = vreinterpretq_s16_u16(vceqzq_s16(b));
  6741. #else
  6742. int16x8_t zeroMask = vreinterpretq_s16_u16(vceqq_s16(b, vdupq_n_s16(0)));
  6743. #endif
  6744. // bitwise select either a or negative 'a' (vnegq_s16(a) equals to negative
  6745. // 'a') based on ltMask
  6746. int16x8_t masked = vbslq_s16(ltMask, vnegq_s16(a), a);
  6747. // res = masked & (~zeroMask)
  6748. int16x8_t res = vbicq_s16(masked, zeroMask);
  6749. return vreinterpretq_m128i_s16(res);
  6750. }
  6751. // Negate packed 32-bit integers in a when the corresponding signed
  6752. // 32-bit integer in b is negative, and store the results in dst.
  6753. // Element in dst are zeroed out when the corresponding element
  6754. // in b is zero.
  6755. //
  6756. // for i in 0..3
  6757. // if b[i] < 0
  6758. // r[i] := -a[i]
  6759. // else if b[i] == 0
  6760. // r[i] := 0
  6761. // else
  6762. // r[i] := a[i]
  6763. // fi
  6764. // done
  6765. FORCE_INLINE __m128i _mm_sign_epi32(__m128i _a, __m128i _b)
  6766. {
  6767. int32x4_t a = vreinterpretq_s32_m128i(_a);
  6768. int32x4_t b = vreinterpretq_s32_m128i(_b);
  6769. // signed shift right: faster than vclt
  6770. // (b < 0) ? 0xFFFFFFFF : 0
  6771. uint32x4_t ltMask = vreinterpretq_u32_s32(vshrq_n_s32(b, 31));
  6772. // (b == 0) ? 0xFFFFFFFF : 0
  6773. #if defined(__aarch64__)
  6774. int32x4_t zeroMask = vreinterpretq_s32_u32(vceqzq_s32(b));
  6775. #else
  6776. int32x4_t zeroMask = vreinterpretq_s32_u32(vceqq_s32(b, vdupq_n_s32(0)));
  6777. #endif
  6778. // bitwise select either a or negative 'a' (vnegq_s32(a) equals to negative
  6779. // 'a') based on ltMask
  6780. int32x4_t masked = vbslq_s32(ltMask, vnegq_s32(a), a);
  6781. // res = masked & (~zeroMask)
  6782. int32x4_t res = vbicq_s32(masked, zeroMask);
  6783. return vreinterpretq_m128i_s32(res);
  6784. }
  6785. // Negate packed 8-bit integers in a when the corresponding signed
  6786. // 8-bit integer in b is negative, and store the results in dst.
  6787. // Element in dst are zeroed out when the corresponding element
  6788. // in b is zero.
  6789. //
  6790. // for i in 0..15
  6791. // if b[i] < 0
  6792. // r[i] := -a[i]
  6793. // else if b[i] == 0
  6794. // r[i] := 0
  6795. // else
  6796. // r[i] := a[i]
  6797. // fi
  6798. // done
  6799. FORCE_INLINE __m128i _mm_sign_epi8(__m128i _a, __m128i _b)
  6800. {
  6801. int8x16_t a = vreinterpretq_s8_m128i(_a);
  6802. int8x16_t b = vreinterpretq_s8_m128i(_b);
  6803. // signed shift right: faster than vclt
  6804. // (b < 0) ? 0xFF : 0
  6805. uint8x16_t ltMask = vreinterpretq_u8_s8(vshrq_n_s8(b, 7));
  6806. // (b == 0) ? 0xFF : 0
  6807. #if defined(__aarch64__)
  6808. int8x16_t zeroMask = vreinterpretq_s8_u8(vceqzq_s8(b));
  6809. #else
  6810. int8x16_t zeroMask = vreinterpretq_s8_u8(vceqq_s8(b, vdupq_n_s8(0)));
  6811. #endif
  6812. // bitwise select either a or negative 'a' (vnegq_s8(a) return negative 'a')
  6813. // based on ltMask
  6814. int8x16_t masked = vbslq_s8(ltMask, vnegq_s8(a), a);
  6815. // res = masked & (~zeroMask)
  6816. int8x16_t res = vbicq_s8(masked, zeroMask);
  6817. return vreinterpretq_m128i_s8(res);
  6818. }
  6819. // Negate packed 16-bit integers in a when the corresponding signed 16-bit
  6820. // integer in b is negative, and store the results in dst. Element in dst are
  6821. // zeroed out when the corresponding element in b is zero.
  6822. //
  6823. // FOR j := 0 to 3
  6824. // i := j*16
  6825. // IF b[i+15:i] < 0
  6826. // dst[i+15:i] := -(a[i+15:i])
  6827. // ELSE IF b[i+15:i] == 0
  6828. // dst[i+15:i] := 0
  6829. // ELSE
  6830. // dst[i+15:i] := a[i+15:i]
  6831. // FI
  6832. // ENDFOR
  6833. //
  6834. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sign_pi16
  6835. FORCE_INLINE __m64 _mm_sign_pi16(__m64 _a, __m64 _b)
  6836. {
  6837. int16x4_t a = vreinterpret_s16_m64(_a);
  6838. int16x4_t b = vreinterpret_s16_m64(_b);
  6839. // signed shift right: faster than vclt
  6840. // (b < 0) ? 0xFFFF : 0
  6841. uint16x4_t ltMask = vreinterpret_u16_s16(vshr_n_s16(b, 15));
  6842. // (b == 0) ? 0xFFFF : 0
  6843. #if defined(__aarch64__)
  6844. int16x4_t zeroMask = vreinterpret_s16_u16(vceqz_s16(b));
  6845. #else
  6846. int16x4_t zeroMask = vreinterpret_s16_u16(vceq_s16(b, vdup_n_s16(0)));
  6847. #endif
  6848. // bitwise select either a or negative 'a' (vneg_s16(a) return negative 'a')
  6849. // based on ltMask
  6850. int16x4_t masked = vbsl_s16(ltMask, vneg_s16(a), a);
  6851. // res = masked & (~zeroMask)
  6852. int16x4_t res = vbic_s16(masked, zeroMask);
  6853. return vreinterpret_m64_s16(res);
  6854. }
  6855. // Negate packed 32-bit integers in a when the corresponding signed 32-bit
  6856. // integer in b is negative, and store the results in dst. Element in dst are
  6857. // zeroed out when the corresponding element in b is zero.
  6858. //
  6859. // FOR j := 0 to 1
  6860. // i := j*32
  6861. // IF b[i+31:i] < 0
  6862. // dst[i+31:i] := -(a[i+31:i])
  6863. // ELSE IF b[i+31:i] == 0
  6864. // dst[i+31:i] := 0
  6865. // ELSE
  6866. // dst[i+31:i] := a[i+31:i]
  6867. // FI
  6868. // ENDFOR
  6869. //
  6870. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sign_pi32
  6871. FORCE_INLINE __m64 _mm_sign_pi32(__m64 _a, __m64 _b)
  6872. {
  6873. int32x2_t a = vreinterpret_s32_m64(_a);
  6874. int32x2_t b = vreinterpret_s32_m64(_b);
  6875. // signed shift right: faster than vclt
  6876. // (b < 0) ? 0xFFFFFFFF : 0
  6877. uint32x2_t ltMask = vreinterpret_u32_s32(vshr_n_s32(b, 31));
  6878. // (b == 0) ? 0xFFFFFFFF : 0
  6879. #if defined(__aarch64__)
  6880. int32x2_t zeroMask = vreinterpret_s32_u32(vceqz_s32(b));
  6881. #else
  6882. int32x2_t zeroMask = vreinterpret_s32_u32(vceq_s32(b, vdup_n_s32(0)));
  6883. #endif
  6884. // bitwise select either a or negative 'a' (vneg_s32(a) return negative 'a')
  6885. // based on ltMask
  6886. int32x2_t masked = vbsl_s32(ltMask, vneg_s32(a), a);
  6887. // res = masked & (~zeroMask)
  6888. int32x2_t res = vbic_s32(masked, zeroMask);
  6889. return vreinterpret_m64_s32(res);
  6890. }
  6891. // Negate packed 8-bit integers in a when the corresponding signed 8-bit integer
  6892. // in b is negative, and store the results in dst. Element in dst are zeroed out
  6893. // when the corresponding element in b is zero.
  6894. //
  6895. // FOR j := 0 to 7
  6896. // i := j*8
  6897. // IF b[i+7:i] < 0
  6898. // dst[i+7:i] := -(a[i+7:i])
  6899. // ELSE IF b[i+7:i] == 0
  6900. // dst[i+7:i] := 0
  6901. // ELSE
  6902. // dst[i+7:i] := a[i+7:i]
  6903. // FI
  6904. // ENDFOR
  6905. //
  6906. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sign_pi8
  6907. FORCE_INLINE __m64 _mm_sign_pi8(__m64 _a, __m64 _b)
  6908. {
  6909. int8x8_t a = vreinterpret_s8_m64(_a);
  6910. int8x8_t b = vreinterpret_s8_m64(_b);
  6911. // signed shift right: faster than vclt
  6912. // (b < 0) ? 0xFF : 0
  6913. uint8x8_t ltMask = vreinterpret_u8_s8(vshr_n_s8(b, 7));
  6914. // (b == 0) ? 0xFF : 0
  6915. #if defined(__aarch64__)
  6916. int8x8_t zeroMask = vreinterpret_s8_u8(vceqz_s8(b));
  6917. #else
  6918. int8x8_t zeroMask = vreinterpret_s8_u8(vceq_s8(b, vdup_n_s8(0)));
  6919. #endif
  6920. // bitwise select either a or negative 'a' (vneg_s8(a) return negative 'a')
  6921. // based on ltMask
  6922. int8x8_t masked = vbsl_s8(ltMask, vneg_s8(a), a);
  6923. // res = masked & (~zeroMask)
  6924. int8x8_t res = vbic_s8(masked, zeroMask);
  6925. return vreinterpret_m64_s8(res);
  6926. }
  6927. /* SSE4.1 */
  6928. // Blend packed 16-bit integers from a and b using control mask imm8, and store
  6929. // the results in dst.
  6930. //
  6931. // FOR j := 0 to 7
  6932. // i := j*16
  6933. // IF imm8[j]
  6934. // dst[i+15:i] := b[i+15:i]
  6935. // ELSE
  6936. // dst[i+15:i] := a[i+15:i]
  6937. // FI
  6938. // ENDFOR
  6939. // FORCE_INLINE __m128i _mm_blend_epi16(__m128i a, __m128i b,
  6940. // __constrange(0,255) int imm)
  6941. #define _mm_blend_epi16(a, b, imm) \
  6942. __extension__({ \
  6943. const uint16_t ones = 0xffff; \
  6944. const uint16_t zeros = 0x0000; \
  6945. const uint16_t _mask[8] = {((imm) & (1 << 0)) ? ones : zeros, \
  6946. ((imm) & (1 << 1)) ? ones : zeros, \
  6947. ((imm) & (1 << 2)) ? ones : zeros, \
  6948. ((imm) & (1 << 3)) ? ones : zeros, \
  6949. ((imm) & (1 << 4)) ? ones : zeros, \
  6950. ((imm) & (1 << 5)) ? ones : zeros, \
  6951. ((imm) & (1 << 6)) ? ones : zeros, \
  6952. ((imm) & (1 << 7)) ? ones : zeros}; \
  6953. uint16x8_t _mask_vec = vld1q_u16(_mask); \
  6954. uint16x8_t _a = vreinterpretq_u16_m128i(a); \
  6955. uint16x8_t _b = vreinterpretq_u16_m128i(b); \
  6956. vreinterpretq_m128i_u16(vbslq_u16(_mask_vec, _b, _a)); \
  6957. })
  6958. // Blend packed double-precision (64-bit) floating-point elements from a and b
  6959. // using control mask imm8, and store the results in dst.
  6960. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blend_pd
  6961. #define _mm_blend_pd(a, b, imm) \
  6962. __extension__({ \
  6963. const uint64_t _mask[2] = { \
  6964. ((imm) & (1 << 0)) ? ~UINT64_C(0) : UINT64_C(0), \
  6965. ((imm) & (1 << 1)) ? ~UINT64_C(0) : UINT64_C(0)}; \
  6966. uint64x2_t _mask_vec = vld1q_u64(_mask); \
  6967. uint64x2_t _a = vreinterpretq_u64_m128d(a); \
  6968. uint64x2_t _b = vreinterpretq_u64_m128d(b); \
  6969. vreinterpretq_m128d_u64(vbslq_u64(_mask_vec, _b, _a)); \
  6970. })
  6971. // Blend packed single-precision (32-bit) floating-point elements from a and b
  6972. // using mask, and store the results in dst.
  6973. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blend_ps
  6974. FORCE_INLINE __m128 _mm_blend_ps(__m128 _a, __m128 _b, const char imm8)
  6975. {
  6976. const uint32_t ALIGN_STRUCT(16)
  6977. data[4] = {((imm8) & (1 << 0)) ? UINT32_MAX : 0,
  6978. ((imm8) & (1 << 1)) ? UINT32_MAX : 0,
  6979. ((imm8) & (1 << 2)) ? UINT32_MAX : 0,
  6980. ((imm8) & (1 << 3)) ? UINT32_MAX : 0};
  6981. uint32x4_t mask = vld1q_u32(data);
  6982. float32x4_t a = vreinterpretq_f32_m128(_a);
  6983. float32x4_t b = vreinterpretq_f32_m128(_b);
  6984. return vreinterpretq_m128_f32(vbslq_f32(mask, b, a));
  6985. }
  6986. // Blend packed 8-bit integers from a and b using mask, and store the results in
  6987. // dst.
  6988. //
  6989. // FOR j := 0 to 15
  6990. // i := j*8
  6991. // IF mask[i+7]
  6992. // dst[i+7:i] := b[i+7:i]
  6993. // ELSE
  6994. // dst[i+7:i] := a[i+7:i]
  6995. // FI
  6996. // ENDFOR
  6997. FORCE_INLINE __m128i _mm_blendv_epi8(__m128i _a, __m128i _b, __m128i _mask)
  6998. {
  6999. // Use a signed shift right to create a mask with the sign bit
  7000. uint8x16_t mask =
  7001. vreinterpretq_u8_s8(vshrq_n_s8(vreinterpretq_s8_m128i(_mask), 7));
  7002. uint8x16_t a = vreinterpretq_u8_m128i(_a);
  7003. uint8x16_t b = vreinterpretq_u8_m128i(_b);
  7004. return vreinterpretq_m128i_u8(vbslq_u8(mask, b, a));
  7005. }
  7006. // Blend packed double-precision (64-bit) floating-point elements from a and b
  7007. // using mask, and store the results in dst.
  7008. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blendv_pd
  7009. FORCE_INLINE __m128d _mm_blendv_pd(__m128d _a, __m128d _b, __m128d _mask)
  7010. {
  7011. uint64x2_t mask =
  7012. vreinterpretq_u64_s64(vshrq_n_s64(vreinterpretq_s64_m128d(_mask), 63));
  7013. #if defined(__aarch64__)
  7014. float64x2_t a = vreinterpretq_f64_m128d(_a);
  7015. float64x2_t b = vreinterpretq_f64_m128d(_b);
  7016. return vreinterpretq_m128d_f64(vbslq_f64(mask, b, a));
  7017. #else
  7018. uint64x2_t a = vreinterpretq_u64_m128d(_a);
  7019. uint64x2_t b = vreinterpretq_u64_m128d(_b);
  7020. return vreinterpretq_m128d_u64(vbslq_u64(mask, b, a));
  7021. #endif
  7022. }
  7023. // Blend packed single-precision (32-bit) floating-point elements from a and b
  7024. // using mask, and store the results in dst.
  7025. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blendv_ps
  7026. FORCE_INLINE __m128 _mm_blendv_ps(__m128 _a, __m128 _b, __m128 _mask)
  7027. {
  7028. // Use a signed shift right to create a mask with the sign bit
  7029. uint32x4_t mask =
  7030. vreinterpretq_u32_s32(vshrq_n_s32(vreinterpretq_s32_m128(_mask), 31));
  7031. float32x4_t a = vreinterpretq_f32_m128(_a);
  7032. float32x4_t b = vreinterpretq_f32_m128(_b);
  7033. return vreinterpretq_m128_f32(vbslq_f32(mask, b, a));
  7034. }
  7035. // Round the packed double-precision (64-bit) floating-point elements in a up
  7036. // to an integer value, and store the results as packed double-precision
  7037. // floating-point elements in dst.
  7038. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ceil_pd
  7039. FORCE_INLINE __m128d _mm_ceil_pd(__m128d a)
  7040. {
  7041. #if defined(__aarch64__)
  7042. return vreinterpretq_m128d_f64(vrndpq_f64(vreinterpretq_f64_m128d(a)));
  7043. #else
  7044. double *f = (double *) &a;
  7045. return _mm_set_pd(ceil(f[1]), ceil(f[0]));
  7046. #endif
  7047. }
  7048. // Round the packed single-precision (32-bit) floating-point elements in a up to
  7049. // an integer value, and store the results as packed single-precision
  7050. // floating-point elements in dst.
  7051. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ceil_ps
  7052. FORCE_INLINE __m128 _mm_ceil_ps(__m128 a)
  7053. {
  7054. #if defined(__aarch64__) || defined(__ARM_FEATURE_DIRECTED_ROUNDING)
  7055. return vreinterpretq_m128_f32(vrndpq_f32(vreinterpretq_f32_m128(a)));
  7056. #else
  7057. float *f = (float *) &a;
  7058. return _mm_set_ps(ceilf(f[3]), ceilf(f[2]), ceilf(f[1]), ceilf(f[0]));
  7059. #endif
  7060. }
  7061. // Round the lower double-precision (64-bit) floating-point element in b up to
  7062. // an integer value, store the result as a double-precision floating-point
  7063. // element in the lower element of dst, and copy the upper element from a to the
  7064. // upper element of dst.
  7065. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ceil_sd
  7066. FORCE_INLINE __m128d _mm_ceil_sd(__m128d a, __m128d b)
  7067. {
  7068. return _mm_move_sd(a, _mm_ceil_pd(b));
  7069. }
  7070. // Round the lower single-precision (32-bit) floating-point element in b up to
  7071. // an integer value, store the result as a single-precision floating-point
  7072. // element in the lower element of dst, and copy the upper 3 packed elements
  7073. // from a to the upper elements of dst.
  7074. //
  7075. // dst[31:0] := CEIL(b[31:0])
  7076. // dst[127:32] := a[127:32]
  7077. //
  7078. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ceil_ss
  7079. FORCE_INLINE __m128 _mm_ceil_ss(__m128 a, __m128 b)
  7080. {
  7081. return _mm_move_ss(a, _mm_ceil_ps(b));
  7082. }
  7083. // Compare packed 64-bit integers in a and b for equality, and store the results
  7084. // in dst
  7085. FORCE_INLINE __m128i _mm_cmpeq_epi64(__m128i a, __m128i b)
  7086. {
  7087. #if defined(__aarch64__)
  7088. return vreinterpretq_m128i_u64(
  7089. vceqq_u64(vreinterpretq_u64_m128i(a), vreinterpretq_u64_m128i(b)));
  7090. #else
  7091. // ARMv7 lacks vceqq_u64
  7092. // (a == b) -> (a_lo == b_lo) && (a_hi == b_hi)
  7093. uint32x4_t cmp =
  7094. vceqq_u32(vreinterpretq_u32_m128i(a), vreinterpretq_u32_m128i(b));
  7095. uint32x4_t swapped = vrev64q_u32(cmp);
  7096. return vreinterpretq_m128i_u32(vandq_u32(cmp, swapped));
  7097. #endif
  7098. }
  7099. // Converts the four signed 16-bit integers in the lower 64 bits to four signed
  7100. // 32-bit integers.
  7101. FORCE_INLINE __m128i _mm_cvtepi16_epi32(__m128i a)
  7102. {
  7103. return vreinterpretq_m128i_s32(
  7104. vmovl_s16(vget_low_s16(vreinterpretq_s16_m128i(a))));
  7105. }
  7106. // Converts the two signed 16-bit integers in the lower 32 bits two signed
  7107. // 32-bit integers.
  7108. FORCE_INLINE __m128i _mm_cvtepi16_epi64(__m128i a)
  7109. {
  7110. int16x8_t s16x8 = vreinterpretq_s16_m128i(a); /* xxxx xxxx xxxx 0B0A */
  7111. int32x4_t s32x4 = vmovl_s16(vget_low_s16(s16x8)); /* 000x 000x 000B 000A */
  7112. int64x2_t s64x2 = vmovl_s32(vget_low_s32(s32x4)); /* 0000 000B 0000 000A */
  7113. return vreinterpretq_m128i_s64(s64x2);
  7114. }
  7115. // Converts the two signed 32-bit integers in the lower 64 bits to two signed
  7116. // 64-bit integers.
  7117. FORCE_INLINE __m128i _mm_cvtepi32_epi64(__m128i a)
  7118. {
  7119. return vreinterpretq_m128i_s64(
  7120. vmovl_s32(vget_low_s32(vreinterpretq_s32_m128i(a))));
  7121. }
  7122. // Converts the four unsigned 8-bit integers in the lower 16 bits to four
  7123. // unsigned 32-bit integers.
  7124. FORCE_INLINE __m128i _mm_cvtepi8_epi16(__m128i a)
  7125. {
  7126. int8x16_t s8x16 = vreinterpretq_s8_m128i(a); /* xxxx xxxx xxxx DCBA */
  7127. int16x8_t s16x8 = vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0D0C 0B0A */
  7128. return vreinterpretq_m128i_s16(s16x8);
  7129. }
  7130. // Converts the four unsigned 8-bit integers in the lower 32 bits to four
  7131. // unsigned 32-bit integers.
  7132. FORCE_INLINE __m128i _mm_cvtepi8_epi32(__m128i a)
  7133. {
  7134. int8x16_t s8x16 = vreinterpretq_s8_m128i(a); /* xxxx xxxx xxxx DCBA */
  7135. int16x8_t s16x8 = vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0D0C 0B0A */
  7136. int32x4_t s32x4 = vmovl_s16(vget_low_s16(s16x8)); /* 000D 000C 000B 000A */
  7137. return vreinterpretq_m128i_s32(s32x4);
  7138. }
  7139. // Converts the two signed 8-bit integers in the lower 32 bits to four
  7140. // signed 64-bit integers.
  7141. FORCE_INLINE __m128i _mm_cvtepi8_epi64(__m128i a)
  7142. {
  7143. int8x16_t s8x16 = vreinterpretq_s8_m128i(a); /* xxxx xxxx xxxx xxBA */
  7144. int16x8_t s16x8 = vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0x0x 0B0A */
  7145. int32x4_t s32x4 = vmovl_s16(vget_low_s16(s16x8)); /* 000x 000x 000B 000A */
  7146. int64x2_t s64x2 = vmovl_s32(vget_low_s32(s32x4)); /* 0000 000B 0000 000A */
  7147. return vreinterpretq_m128i_s64(s64x2);
  7148. }
  7149. // Converts the four unsigned 16-bit integers in the lower 64 bits to four
  7150. // unsigned 32-bit integers.
  7151. FORCE_INLINE __m128i _mm_cvtepu16_epi32(__m128i a)
  7152. {
  7153. return vreinterpretq_m128i_u32(
  7154. vmovl_u16(vget_low_u16(vreinterpretq_u16_m128i(a))));
  7155. }
  7156. // Converts the two unsigned 16-bit integers in the lower 32 bits to two
  7157. // unsigned 64-bit integers.
  7158. FORCE_INLINE __m128i _mm_cvtepu16_epi64(__m128i a)
  7159. {
  7160. uint16x8_t u16x8 = vreinterpretq_u16_m128i(a); /* xxxx xxxx xxxx 0B0A */
  7161. uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000x 000x 000B 000A */
  7162. uint64x2_t u64x2 = vmovl_u32(vget_low_u32(u32x4)); /* 0000 000B 0000 000A */
  7163. return vreinterpretq_m128i_u64(u64x2);
  7164. }
  7165. // Converts the two unsigned 32-bit integers in the lower 64 bits to two
  7166. // unsigned 64-bit integers.
  7167. FORCE_INLINE __m128i _mm_cvtepu32_epi64(__m128i a)
  7168. {
  7169. return vreinterpretq_m128i_u64(
  7170. vmovl_u32(vget_low_u32(vreinterpretq_u32_m128i(a))));
  7171. }
  7172. // Zero extend packed unsigned 8-bit integers in a to packed 16-bit integers,
  7173. // and store the results in dst.
  7174. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepu8_epi16
  7175. FORCE_INLINE __m128i _mm_cvtepu8_epi16(__m128i a)
  7176. {
  7177. uint8x16_t u8x16 = vreinterpretq_u8_m128i(a); /* xxxx xxxx HGFE DCBA */
  7178. uint16x8_t u16x8 = vmovl_u8(vget_low_u8(u8x16)); /* 0H0G 0F0E 0D0C 0B0A */
  7179. return vreinterpretq_m128i_u16(u16x8);
  7180. }
  7181. // Converts the four unsigned 8-bit integers in the lower 32 bits to four
  7182. // unsigned 32-bit integers.
  7183. // https://msdn.microsoft.com/en-us/library/bb531467%28v=vs.100%29.aspx
  7184. FORCE_INLINE __m128i _mm_cvtepu8_epi32(__m128i a)
  7185. {
  7186. uint8x16_t u8x16 = vreinterpretq_u8_m128i(a); /* xxxx xxxx xxxx DCBA */
  7187. uint16x8_t u16x8 = vmovl_u8(vget_low_u8(u8x16)); /* 0x0x 0x0x 0D0C 0B0A */
  7188. uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000D 000C 000B 000A */
  7189. return vreinterpretq_m128i_u32(u32x4);
  7190. }
  7191. // Converts the two unsigned 8-bit integers in the lower 16 bits to two
  7192. // unsigned 64-bit integers.
  7193. FORCE_INLINE __m128i _mm_cvtepu8_epi64(__m128i a)
  7194. {
  7195. uint8x16_t u8x16 = vreinterpretq_u8_m128i(a); /* xxxx xxxx xxxx xxBA */
  7196. uint16x8_t u16x8 = vmovl_u8(vget_low_u8(u8x16)); /* 0x0x 0x0x 0x0x 0B0A */
  7197. uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000x 000x 000B 000A */
  7198. uint64x2_t u64x2 = vmovl_u32(vget_low_u32(u32x4)); /* 0000 000B 0000 000A */
  7199. return vreinterpretq_m128i_u64(u64x2);
  7200. }
  7201. // Conditionally multiply the packed double-precision (64-bit) floating-point
  7202. // elements in a and b using the high 4 bits in imm8, sum the four products, and
  7203. // conditionally store the sum in dst using the low 4 bits of imm8.
  7204. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_dp_pd
  7205. FORCE_INLINE __m128d _mm_dp_pd(__m128d a, __m128d b, const int imm)
  7206. {
  7207. // Generate mask value from constant immediate bit value
  7208. const int64_t bit0Mask = imm & 0x01 ? UINT64_MAX : 0;
  7209. const int64_t bit1Mask = imm & 0x02 ? UINT64_MAX : 0;
  7210. #if !SSE2NEON_PRECISE_DP
  7211. const int64_t bit4Mask = imm & 0x10 ? UINT64_MAX : 0;
  7212. const int64_t bit5Mask = imm & 0x20 ? UINT64_MAX : 0;
  7213. #endif
  7214. // Conditional multiplication
  7215. #if !SSE2NEON_PRECISE_DP
  7216. __m128d mul = _mm_mul_pd(a, b);
  7217. const __m128d mulMask =
  7218. _mm_castsi128_pd(_mm_set_epi64x(bit5Mask, bit4Mask));
  7219. __m128d tmp = _mm_and_pd(mul, mulMask);
  7220. #else
  7221. #if defined(__aarch64__)
  7222. double d0 = (imm & 0x10) ? vgetq_lane_f64(vreinterpretq_f64_m128d(a), 0) *
  7223. vgetq_lane_f64(vreinterpretq_f64_m128d(b), 0)
  7224. : 0;
  7225. double d1 = (imm & 0x20) ? vgetq_lane_f64(vreinterpretq_f64_m128d(a), 1) *
  7226. vgetq_lane_f64(vreinterpretq_f64_m128d(b), 1)
  7227. : 0;
  7228. #else
  7229. double d0 = (imm & 0x10) ? ((double *) &a)[0] * ((double *) &b)[0] : 0;
  7230. double d1 = (imm & 0x20) ? ((double *) &a)[1] * ((double *) &b)[1] : 0;
  7231. #endif
  7232. __m128d tmp = _mm_set_pd(d1, d0);
  7233. #endif
  7234. // Sum the products
  7235. #if defined(__aarch64__)
  7236. double sum = vpaddd_f64(vreinterpretq_f64_m128d(tmp));
  7237. #else
  7238. double sum = *((double *) &tmp) + *(((double *) &tmp) + 1);
  7239. #endif
  7240. // Conditionally store the sum
  7241. const __m128d sumMask =
  7242. _mm_castsi128_pd(_mm_set_epi64x(bit1Mask, bit0Mask));
  7243. __m128d res = _mm_and_pd(_mm_set_pd1(sum), sumMask);
  7244. return res;
  7245. }
  7246. // Conditionally multiply the packed single-precision (32-bit) floating-point
  7247. // elements in a and b using the high 4 bits in imm8, sum the four products,
  7248. // and conditionally store the sum in dst using the low 4 bits of imm.
  7249. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_dp_ps
  7250. FORCE_INLINE __m128 _mm_dp_ps(__m128 a, __m128 b, const int imm)
  7251. {
  7252. #if defined(__aarch64__)
  7253. /* shortcuts */
  7254. if (imm == 0xFF) {
  7255. return _mm_set1_ps(vaddvq_f32(_mm_mul_ps(a, b)));
  7256. }
  7257. if (imm == 0x7F) {
  7258. float32x4_t m = _mm_mul_ps(a, b);
  7259. m[3] = 0;
  7260. return _mm_set1_ps(vaddvq_f32(m));
  7261. }
  7262. #endif
  7263. float s = 0, c = 0;
  7264. float32x4_t f32a = vreinterpretq_f32_m128(a);
  7265. float32x4_t f32b = vreinterpretq_f32_m128(b);
  7266. /* To improve the accuracy of floating-point summation, Kahan algorithm
  7267. * is used for each operation.
  7268. */
  7269. if (imm & (1 << 4))
  7270. _sse2neon_kadd_f32(&s, &c, f32a[0] * f32b[0]);
  7271. if (imm & (1 << 5))
  7272. _sse2neon_kadd_f32(&s, &c, f32a[1] * f32b[1]);
  7273. if (imm & (1 << 6))
  7274. _sse2neon_kadd_f32(&s, &c, f32a[2] * f32b[2]);
  7275. if (imm & (1 << 7))
  7276. _sse2neon_kadd_f32(&s, &c, f32a[3] * f32b[3]);
  7277. s += c;
  7278. float32x4_t res = {
  7279. (imm & 0x1) ? s : 0,
  7280. (imm & 0x2) ? s : 0,
  7281. (imm & 0x4) ? s : 0,
  7282. (imm & 0x8) ? s : 0,
  7283. };
  7284. return vreinterpretq_m128_f32(res);
  7285. }
  7286. // Extracts the selected signed or unsigned 32-bit integer from a and zero
  7287. // extends.
  7288. // FORCE_INLINE int _mm_extract_epi32(__m128i a, __constrange(0,4) int imm)
  7289. #define _mm_extract_epi32(a, imm) \
  7290. vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm))
  7291. // Extracts the selected signed or unsigned 64-bit integer from a and zero
  7292. // extends.
  7293. // FORCE_INLINE __int64 _mm_extract_epi64(__m128i a, __constrange(0,2) int imm)
  7294. #define _mm_extract_epi64(a, imm) \
  7295. vgetq_lane_s64(vreinterpretq_s64_m128i(a), (imm))
  7296. // Extracts the selected signed or unsigned 8-bit integer from a and zero
  7297. // extends.
  7298. // FORCE_INLINE int _mm_extract_epi8(__m128i a, __constrange(0,16) int imm)
  7299. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_extract_epi8
  7300. #define _mm_extract_epi8(a, imm) vgetq_lane_u8(vreinterpretq_u8_m128i(a), (imm))
  7301. // Extracts the selected single-precision (32-bit) floating-point from a.
  7302. // FORCE_INLINE int _mm_extract_ps(__m128 a, __constrange(0,4) int imm)
  7303. #define _mm_extract_ps(a, imm) vgetq_lane_s32(vreinterpretq_s32_m128(a), (imm))
  7304. // Round the packed double-precision (64-bit) floating-point elements in a down
  7305. // to an integer value, and store the results as packed double-precision
  7306. // floating-point elements in dst.
  7307. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_floor_pd
  7308. FORCE_INLINE __m128d _mm_floor_pd(__m128d a)
  7309. {
  7310. #if defined(__aarch64__)
  7311. return vreinterpretq_m128d_f64(vrndmq_f64(vreinterpretq_f64_m128d(a)));
  7312. #else
  7313. double *f = (double *) &a;
  7314. return _mm_set_pd(floor(f[1]), floor(f[0]));
  7315. #endif
  7316. }
  7317. // Round the packed single-precision (32-bit) floating-point elements in a down
  7318. // to an integer value, and store the results as packed single-precision
  7319. // floating-point elements in dst.
  7320. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_floor_ps
  7321. FORCE_INLINE __m128 _mm_floor_ps(__m128 a)
  7322. {
  7323. #if defined(__aarch64__) || defined(__ARM_FEATURE_DIRECTED_ROUNDING)
  7324. return vreinterpretq_m128_f32(vrndmq_f32(vreinterpretq_f32_m128(a)));
  7325. #else
  7326. float *f = (float *) &a;
  7327. return _mm_set_ps(floorf(f[3]), floorf(f[2]), floorf(f[1]), floorf(f[0]));
  7328. #endif
  7329. }
  7330. // Round the lower double-precision (64-bit) floating-point element in b down to
  7331. // an integer value, store the result as a double-precision floating-point
  7332. // element in the lower element of dst, and copy the upper element from a to the
  7333. // upper element of dst.
  7334. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_floor_sd
  7335. FORCE_INLINE __m128d _mm_floor_sd(__m128d a, __m128d b)
  7336. {
  7337. return _mm_move_sd(a, _mm_floor_pd(b));
  7338. }
  7339. // Round the lower single-precision (32-bit) floating-point element in b down to
  7340. // an integer value, store the result as a single-precision floating-point
  7341. // element in the lower element of dst, and copy the upper 3 packed elements
  7342. // from a to the upper elements of dst.
  7343. //
  7344. // dst[31:0] := FLOOR(b[31:0])
  7345. // dst[127:32] := a[127:32]
  7346. //
  7347. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_floor_ss
  7348. FORCE_INLINE __m128 _mm_floor_ss(__m128 a, __m128 b)
  7349. {
  7350. return _mm_move_ss(a, _mm_floor_ps(b));
  7351. }
  7352. // Inserts the least significant 32 bits of b into the selected 32-bit integer
  7353. // of a.
  7354. // FORCE_INLINE __m128i _mm_insert_epi32(__m128i a, int b,
  7355. // __constrange(0,4) int imm)
  7356. #define _mm_insert_epi32(a, b, imm) \
  7357. __extension__({ \
  7358. vreinterpretq_m128i_s32( \
  7359. vsetq_lane_s32((b), vreinterpretq_s32_m128i(a), (imm))); \
  7360. })
  7361. // Inserts the least significant 64 bits of b into the selected 64-bit integer
  7362. // of a.
  7363. // FORCE_INLINE __m128i _mm_insert_epi64(__m128i a, __int64 b,
  7364. // __constrange(0,2) int imm)
  7365. #define _mm_insert_epi64(a, b, imm) \
  7366. __extension__({ \
  7367. vreinterpretq_m128i_s64( \
  7368. vsetq_lane_s64((b), vreinterpretq_s64_m128i(a), (imm))); \
  7369. })
  7370. // Inserts the least significant 8 bits of b into the selected 8-bit integer
  7371. // of a.
  7372. // FORCE_INLINE __m128i _mm_insert_epi8(__m128i a, int b,
  7373. // __constrange(0,16) int imm)
  7374. #define _mm_insert_epi8(a, b, imm) \
  7375. __extension__({ \
  7376. vreinterpretq_m128i_s8( \
  7377. vsetq_lane_s8((b), vreinterpretq_s8_m128i(a), (imm))); \
  7378. })
  7379. // Copy a to tmp, then insert a single-precision (32-bit) floating-point
  7380. // element from b into tmp using the control in imm8. Store tmp to dst using
  7381. // the mask in imm8 (elements are zeroed out when the corresponding bit is set).
  7382. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=insert_ps
  7383. #define _mm_insert_ps(a, b, imm8) \
  7384. __extension__({ \
  7385. float32x4_t tmp1 = \
  7386. vsetq_lane_f32(vgetq_lane_f32(b, (imm8 >> 6) & 0x3), \
  7387. vreinterpretq_f32_m128(a), 0); \
  7388. float32x4_t tmp2 = \
  7389. vsetq_lane_f32(vgetq_lane_f32(tmp1, 0), vreinterpretq_f32_m128(a), \
  7390. ((imm8 >> 4) & 0x3)); \
  7391. const uint32_t data[4] = {((imm8) & (1 << 0)) ? UINT32_MAX : 0, \
  7392. ((imm8) & (1 << 1)) ? UINT32_MAX : 0, \
  7393. ((imm8) & (1 << 2)) ? UINT32_MAX : 0, \
  7394. ((imm8) & (1 << 3)) ? UINT32_MAX : 0}; \
  7395. uint32x4_t mask = vld1q_u32(data); \
  7396. float32x4_t all_zeros = vdupq_n_f32(0); \
  7397. \
  7398. vreinterpretq_m128_f32( \
  7399. vbslq_f32(mask, all_zeros, vreinterpretq_f32_m128(tmp2))); \
  7400. })
  7401. // epi versions of min/max
  7402. // Computes the pariwise maximums of the four signed 32-bit integer values of a
  7403. // and b.
  7404. //
  7405. // A 128-bit parameter that can be defined with the following equations:
  7406. // r0 := (a0 > b0) ? a0 : b0
  7407. // r1 := (a1 > b1) ? a1 : b1
  7408. // r2 := (a2 > b2) ? a2 : b2
  7409. // r3 := (a3 > b3) ? a3 : b3
  7410. //
  7411. // https://msdn.microsoft.com/en-us/library/vstudio/bb514055(v=vs.100).aspx
  7412. FORCE_INLINE __m128i _mm_max_epi32(__m128i a, __m128i b)
  7413. {
  7414. return vreinterpretq_m128i_s32(
  7415. vmaxq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
  7416. }
  7417. // Compare packed signed 8-bit integers in a and b, and store packed maximum
  7418. // values in dst.
  7419. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epi8
  7420. FORCE_INLINE __m128i _mm_max_epi8(__m128i a, __m128i b)
  7421. {
  7422. return vreinterpretq_m128i_s8(
  7423. vmaxq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
  7424. }
  7425. // Compare packed unsigned 16-bit integers in a and b, and store packed maximum
  7426. // values in dst.
  7427. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epu16
  7428. FORCE_INLINE __m128i _mm_max_epu16(__m128i a, __m128i b)
  7429. {
  7430. return vreinterpretq_m128i_u16(
  7431. vmaxq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b)));
  7432. }
  7433. // Compare packed unsigned 32-bit integers in a and b, and store packed maximum
  7434. // values in dst.
  7435. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epu32
  7436. FORCE_INLINE __m128i _mm_max_epu32(__m128i a, __m128i b)
  7437. {
  7438. return vreinterpretq_m128i_u32(
  7439. vmaxq_u32(vreinterpretq_u32_m128i(a), vreinterpretq_u32_m128i(b)));
  7440. }
  7441. // Computes the pariwise minima of the four signed 32-bit integer values of a
  7442. // and b.
  7443. //
  7444. // A 128-bit parameter that can be defined with the following equations:
  7445. // r0 := (a0 < b0) ? a0 : b0
  7446. // r1 := (a1 < b1) ? a1 : b1
  7447. // r2 := (a2 < b2) ? a2 : b2
  7448. // r3 := (a3 < b3) ? a3 : b3
  7449. //
  7450. // https://msdn.microsoft.com/en-us/library/vstudio/bb531476(v=vs.100).aspx
  7451. FORCE_INLINE __m128i _mm_min_epi32(__m128i a, __m128i b)
  7452. {
  7453. return vreinterpretq_m128i_s32(
  7454. vminq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
  7455. }
  7456. // Compare packed signed 8-bit integers in a and b, and store packed minimum
  7457. // values in dst.
  7458. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_epi8
  7459. FORCE_INLINE __m128i _mm_min_epi8(__m128i a, __m128i b)
  7460. {
  7461. return vreinterpretq_m128i_s8(
  7462. vminq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
  7463. }
  7464. // Compare packed unsigned 16-bit integers in a and b, and store packed minimum
  7465. // values in dst.
  7466. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_epu16
  7467. FORCE_INLINE __m128i _mm_min_epu16(__m128i a, __m128i b)
  7468. {
  7469. return vreinterpretq_m128i_u16(
  7470. vminq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b)));
  7471. }
  7472. // Compare packed unsigned 32-bit integers in a and b, and store packed minimum
  7473. // values in dst.
  7474. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epu32
  7475. FORCE_INLINE __m128i _mm_min_epu32(__m128i a, __m128i b)
  7476. {
  7477. return vreinterpretq_m128i_u32(
  7478. vminq_u32(vreinterpretq_u32_m128i(a), vreinterpretq_u32_m128i(b)));
  7479. }
  7480. // Horizontally compute the minimum amongst the packed unsigned 16-bit integers
  7481. // in a, store the minimum and index in dst, and zero the remaining bits in dst.
  7482. //
  7483. // index[2:0] := 0
  7484. // min[15:0] := a[15:0]
  7485. // FOR j := 0 to 7
  7486. // i := j*16
  7487. // IF a[i+15:i] < min[15:0]
  7488. // index[2:0] := j
  7489. // min[15:0] := a[i+15:i]
  7490. // FI
  7491. // ENDFOR
  7492. // dst[15:0] := min[15:0]
  7493. // dst[18:16] := index[2:0]
  7494. // dst[127:19] := 0
  7495. //
  7496. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_minpos_epu16
  7497. FORCE_INLINE __m128i _mm_minpos_epu16(__m128i a)
  7498. {
  7499. __m128i dst;
  7500. uint16_t min, idx = 0;
  7501. #if defined(__aarch64__)
  7502. // Find the minimum value
  7503. min = vminvq_u16(vreinterpretq_u16_m128i(a));
  7504. // Get the index of the minimum value
  7505. static const uint16_t idxv[] = {0, 1, 2, 3, 4, 5, 6, 7};
  7506. uint16x8_t minv = vdupq_n_u16(min);
  7507. uint16x8_t cmeq = vceqq_u16(minv, vreinterpretq_u16_m128i(a));
  7508. idx = vminvq_u16(vornq_u16(vld1q_u16(idxv), cmeq));
  7509. #else
  7510. // Find the minimum value
  7511. __m64 tmp;
  7512. tmp = vreinterpret_m64_u16(
  7513. vmin_u16(vget_low_u16(vreinterpretq_u16_m128i(a)),
  7514. vget_high_u16(vreinterpretq_u16_m128i(a))));
  7515. tmp = vreinterpret_m64_u16(
  7516. vpmin_u16(vreinterpret_u16_m64(tmp), vreinterpret_u16_m64(tmp)));
  7517. tmp = vreinterpret_m64_u16(
  7518. vpmin_u16(vreinterpret_u16_m64(tmp), vreinterpret_u16_m64(tmp)));
  7519. min = vget_lane_u16(vreinterpret_u16_m64(tmp), 0);
  7520. // Get the index of the minimum value
  7521. int i;
  7522. for (i = 0; i < 8; i++) {
  7523. if (min == vgetq_lane_u16(vreinterpretq_u16_m128i(a), 0)) {
  7524. idx = (uint16_t) i;
  7525. break;
  7526. }
  7527. a = _mm_srli_si128(a, 2);
  7528. }
  7529. #endif
  7530. // Generate result
  7531. dst = _mm_setzero_si128();
  7532. dst = vreinterpretq_m128i_u16(
  7533. vsetq_lane_u16(min, vreinterpretq_u16_m128i(dst), 0));
  7534. dst = vreinterpretq_m128i_u16(
  7535. vsetq_lane_u16(idx, vreinterpretq_u16_m128i(dst), 1));
  7536. return dst;
  7537. }
  7538. // Compute the sum of absolute differences (SADs) of quadruplets of unsigned
  7539. // 8-bit integers in a compared to those in b, and store the 16-bit results in
  7540. // dst. Eight SADs are performed using one quadruplet from b and eight
  7541. // quadruplets from a. One quadruplet is selected from b starting at on the
  7542. // offset specified in imm8. Eight quadruplets are formed from sequential 8-bit
  7543. // integers selected from a starting at the offset specified in imm8.
  7544. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mpsadbw_epu8
  7545. FORCE_INLINE __m128i _mm_mpsadbw_epu8(__m128i a, __m128i b, const int imm)
  7546. {
  7547. uint8x16_t _a, _b;
  7548. switch (imm & 0x4) {
  7549. case 0:
  7550. // do nothing
  7551. _a = vreinterpretq_u8_m128i(a);
  7552. break;
  7553. case 4:
  7554. _a = vreinterpretq_u8_u32(vextq_u32(vreinterpretq_u32_m128i(a),
  7555. vreinterpretq_u32_m128i(a), 1));
  7556. break;
  7557. default:
  7558. #if defined(__GNUC__) || defined(__clang__)
  7559. __builtin_unreachable();
  7560. #endif
  7561. break;
  7562. }
  7563. switch (imm & 0x3) {
  7564. case 0:
  7565. _b = vreinterpretq_u8_u32(
  7566. vdupq_n_u32(vgetq_lane_u32(vreinterpretq_u32_m128i(b), 0)));
  7567. break;
  7568. case 1:
  7569. _b = vreinterpretq_u8_u32(
  7570. vdupq_n_u32(vgetq_lane_u32(vreinterpretq_u32_m128i(b), 1)));
  7571. break;
  7572. case 2:
  7573. _b = vreinterpretq_u8_u32(
  7574. vdupq_n_u32(vgetq_lane_u32(vreinterpretq_u32_m128i(b), 2)));
  7575. break;
  7576. case 3:
  7577. _b = vreinterpretq_u8_u32(
  7578. vdupq_n_u32(vgetq_lane_u32(vreinterpretq_u32_m128i(b), 3)));
  7579. break;
  7580. default:
  7581. #if defined(__GNUC__) || defined(__clang__)
  7582. __builtin_unreachable();
  7583. #endif
  7584. break;
  7585. }
  7586. int16x8_t c04, c15, c26, c37;
  7587. uint8x8_t low_b = vget_low_u8(_b);
  7588. c04 = vreinterpretq_s16_u16(vabdl_u8(vget_low_u8(_a), low_b));
  7589. uint8x16_t _a_1 = vextq_u8(_a, _a, 1);
  7590. c15 = vreinterpretq_s16_u16(vabdl_u8(vget_low_u8(_a_1), low_b));
  7591. uint8x16_t _a_2 = vextq_u8(_a, _a, 2);
  7592. c26 = vreinterpretq_s16_u16(vabdl_u8(vget_low_u8(_a_2), low_b));
  7593. uint8x16_t _a_3 = vextq_u8(_a, _a, 3);
  7594. c37 = vreinterpretq_s16_u16(vabdl_u8(vget_low_u8(_a_3), low_b));
  7595. #if defined(__aarch64__)
  7596. // |0|4|2|6|
  7597. c04 = vpaddq_s16(c04, c26);
  7598. // |1|5|3|7|
  7599. c15 = vpaddq_s16(c15, c37);
  7600. int32x4_t trn1_c =
  7601. vtrn1q_s32(vreinterpretq_s32_s16(c04), vreinterpretq_s32_s16(c15));
  7602. int32x4_t trn2_c =
  7603. vtrn2q_s32(vreinterpretq_s32_s16(c04), vreinterpretq_s32_s16(c15));
  7604. return vreinterpretq_m128i_s16(vpaddq_s16(vreinterpretq_s16_s32(trn1_c),
  7605. vreinterpretq_s16_s32(trn2_c)));
  7606. #else
  7607. int16x4_t c01, c23, c45, c67;
  7608. c01 = vpadd_s16(vget_low_s16(c04), vget_low_s16(c15));
  7609. c23 = vpadd_s16(vget_low_s16(c26), vget_low_s16(c37));
  7610. c45 = vpadd_s16(vget_high_s16(c04), vget_high_s16(c15));
  7611. c67 = vpadd_s16(vget_high_s16(c26), vget_high_s16(c37));
  7612. return vreinterpretq_m128i_s16(
  7613. vcombine_s16(vpadd_s16(c01, c23), vpadd_s16(c45, c67)));
  7614. #endif
  7615. }
  7616. // Multiply the low signed 32-bit integers from each packed 64-bit element in
  7617. // a and b, and store the signed 64-bit results in dst.
  7618. //
  7619. // r0 := (int64_t)(int32_t)a0 * (int64_t)(int32_t)b0
  7620. // r1 := (int64_t)(int32_t)a2 * (int64_t)(int32_t)b2
  7621. FORCE_INLINE __m128i _mm_mul_epi32(__m128i a, __m128i b)
  7622. {
  7623. // vmull_s32 upcasts instead of masking, so we downcast.
  7624. int32x2_t a_lo = vmovn_s64(vreinterpretq_s64_m128i(a));
  7625. int32x2_t b_lo = vmovn_s64(vreinterpretq_s64_m128i(b));
  7626. return vreinterpretq_m128i_s64(vmull_s32(a_lo, b_lo));
  7627. }
  7628. // Multiplies the 4 signed or unsigned 32-bit integers from a by the 4 signed or
  7629. // unsigned 32-bit integers from b.
  7630. // https://msdn.microsoft.com/en-us/library/vstudio/bb531409(v=vs.100).aspx
  7631. FORCE_INLINE __m128i _mm_mullo_epi32(__m128i a, __m128i b)
  7632. {
  7633. return vreinterpretq_m128i_s32(
  7634. vmulq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
  7635. }
  7636. // Packs the 8 unsigned 32-bit integers from a and b into unsigned 16-bit
  7637. // integers and saturates.
  7638. //
  7639. // r0 := UnsignedSaturate(a0)
  7640. // r1 := UnsignedSaturate(a1)
  7641. // r2 := UnsignedSaturate(a2)
  7642. // r3 := UnsignedSaturate(a3)
  7643. // r4 := UnsignedSaturate(b0)
  7644. // r5 := UnsignedSaturate(b1)
  7645. // r6 := UnsignedSaturate(b2)
  7646. // r7 := UnsignedSaturate(b3)
  7647. FORCE_INLINE __m128i _mm_packus_epi32(__m128i a, __m128i b)
  7648. {
  7649. return vreinterpretq_m128i_u16(
  7650. vcombine_u16(vqmovun_s32(vreinterpretq_s32_m128i(a)),
  7651. vqmovun_s32(vreinterpretq_s32_m128i(b))));
  7652. }
  7653. // Round the packed double-precision (64-bit) floating-point elements in a using
  7654. // the rounding parameter, and store the results as packed double-precision
  7655. // floating-point elements in dst.
  7656. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_round_pd
  7657. FORCE_INLINE __m128d _mm_round_pd(__m128d a, int rounding)
  7658. {
  7659. #if defined(__aarch64__)
  7660. switch (rounding) {
  7661. case (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC):
  7662. return vreinterpretq_m128d_f64(vrndnq_f64(vreinterpretq_f64_m128d(a)));
  7663. case (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC):
  7664. return _mm_floor_pd(a);
  7665. case (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC):
  7666. return _mm_ceil_pd(a);
  7667. case (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC):
  7668. return vreinterpretq_m128d_f64(vrndq_f64(vreinterpretq_f64_m128d(a)));
  7669. default: //_MM_FROUND_CUR_DIRECTION
  7670. return vreinterpretq_m128d_f64(vrndiq_f64(vreinterpretq_f64_m128d(a)));
  7671. }
  7672. #else
  7673. double *v_double = (double *) &a;
  7674. if (rounding == (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC) ||
  7675. (rounding == _MM_FROUND_CUR_DIRECTION &&
  7676. _MM_GET_ROUNDING_MODE() == _MM_ROUND_NEAREST)) {
  7677. double res[2], tmp;
  7678. for (int i = 0; i < 2; i++) {
  7679. tmp = (v_double[i] < 0) ? -v_double[i] : v_double[i];
  7680. double roundDown = floor(tmp); // Round down value
  7681. double roundUp = ceil(tmp); // Round up value
  7682. double diffDown = tmp - roundDown;
  7683. double diffUp = roundUp - tmp;
  7684. if (diffDown < diffUp) {
  7685. /* If it's closer to the round down value, then use it */
  7686. res[i] = roundDown;
  7687. } else if (diffDown > diffUp) {
  7688. /* If it's closer to the round up value, then use it */
  7689. res[i] = roundUp;
  7690. } else {
  7691. /* If it's equidistant between round up and round down value,
  7692. * pick the one which is an even number */
  7693. double half = roundDown / 2;
  7694. if (half != floor(half)) {
  7695. /* If the round down value is odd, return the round up value
  7696. */
  7697. res[i] = roundUp;
  7698. } else {
  7699. /* If the round up value is odd, return the round down value
  7700. */
  7701. res[i] = roundDown;
  7702. }
  7703. }
  7704. res[i] = (v_double[i] < 0) ? -res[i] : res[i];
  7705. }
  7706. return _mm_set_pd(res[1], res[0]);
  7707. } else if (rounding == (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC) ||
  7708. (rounding == _MM_FROUND_CUR_DIRECTION &&
  7709. _MM_GET_ROUNDING_MODE() == _MM_ROUND_DOWN)) {
  7710. return _mm_floor_pd(a);
  7711. } else if (rounding == (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC) ||
  7712. (rounding == _MM_FROUND_CUR_DIRECTION &&
  7713. _MM_GET_ROUNDING_MODE() == _MM_ROUND_UP)) {
  7714. return _mm_ceil_pd(a);
  7715. }
  7716. return _mm_set_pd(v_double[1] > 0 ? floor(v_double[1]) : ceil(v_double[1]),
  7717. v_double[0] > 0 ? floor(v_double[0]) : ceil(v_double[0]));
  7718. #endif
  7719. }
  7720. // Round the packed single-precision (32-bit) floating-point elements in a using
  7721. // the rounding parameter, and store the results as packed single-precision
  7722. // floating-point elements in dst.
  7723. // software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_round_ps
  7724. FORCE_INLINE __m128 _mm_round_ps(__m128 a, int rounding)
  7725. {
  7726. #if defined(__aarch64__) || defined(__ARM_FEATURE_DIRECTED_ROUNDING)
  7727. switch (rounding) {
  7728. case (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC):
  7729. return vreinterpretq_m128_f32(vrndnq_f32(vreinterpretq_f32_m128(a)));
  7730. case (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC):
  7731. return _mm_floor_ps(a);
  7732. case (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC):
  7733. return _mm_ceil_ps(a);
  7734. case (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC):
  7735. return vreinterpretq_m128_f32(vrndq_f32(vreinterpretq_f32_m128(a)));
  7736. default: //_MM_FROUND_CUR_DIRECTION
  7737. return vreinterpretq_m128_f32(vrndiq_f32(vreinterpretq_f32_m128(a)));
  7738. }
  7739. #else
  7740. float *v_float = (float *) &a;
  7741. if (rounding == (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC) ||
  7742. (rounding == _MM_FROUND_CUR_DIRECTION &&
  7743. _MM_GET_ROUNDING_MODE() == _MM_ROUND_NEAREST)) {
  7744. uint32x4_t signmask = vdupq_n_u32(0x80000000);
  7745. float32x4_t half = vbslq_f32(signmask, vreinterpretq_f32_m128(a),
  7746. vdupq_n_f32(0.5f)); /* +/- 0.5 */
  7747. int32x4_t r_normal = vcvtq_s32_f32(vaddq_f32(
  7748. vreinterpretq_f32_m128(a), half)); /* round to integer: [a + 0.5]*/
  7749. int32x4_t r_trunc = vcvtq_s32_f32(
  7750. vreinterpretq_f32_m128(a)); /* truncate to integer: [a] */
  7751. int32x4_t plusone = vreinterpretq_s32_u32(vshrq_n_u32(
  7752. vreinterpretq_u32_s32(vnegq_s32(r_trunc)), 31)); /* 1 or 0 */
  7753. int32x4_t r_even = vbicq_s32(vaddq_s32(r_trunc, plusone),
  7754. vdupq_n_s32(1)); /* ([a] + {0,1}) & ~1 */
  7755. float32x4_t delta = vsubq_f32(
  7756. vreinterpretq_f32_m128(a),
  7757. vcvtq_f32_s32(r_trunc)); /* compute delta: delta = (a - [a]) */
  7758. uint32x4_t is_delta_half =
  7759. vceqq_f32(delta, half); /* delta == +/- 0.5 */
  7760. return vreinterpretq_m128_f32(
  7761. vcvtq_f32_s32(vbslq_s32(is_delta_half, r_even, r_normal)));
  7762. } else if (rounding == (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC) ||
  7763. (rounding == _MM_FROUND_CUR_DIRECTION &&
  7764. _MM_GET_ROUNDING_MODE() == _MM_ROUND_DOWN)) {
  7765. return _mm_floor_ps(a);
  7766. } else if (rounding == (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC) ||
  7767. (rounding == _MM_FROUND_CUR_DIRECTION &&
  7768. _MM_GET_ROUNDING_MODE() == _MM_ROUND_UP)) {
  7769. return _mm_ceil_ps(a);
  7770. }
  7771. return _mm_set_ps(v_float[3] > 0 ? floorf(v_float[3]) : ceilf(v_float[3]),
  7772. v_float[2] > 0 ? floorf(v_float[2]) : ceilf(v_float[2]),
  7773. v_float[1] > 0 ? floorf(v_float[1]) : ceilf(v_float[1]),
  7774. v_float[0] > 0 ? floorf(v_float[0]) : ceilf(v_float[0]));
  7775. #endif
  7776. }
  7777. // Round the lower double-precision (64-bit) floating-point element in b using
  7778. // the rounding parameter, store the result as a double-precision floating-point
  7779. // element in the lower element of dst, and copy the upper element from a to the
  7780. // upper element of dst.
  7781. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_round_sd
  7782. FORCE_INLINE __m128d _mm_round_sd(__m128d a, __m128d b, int rounding)
  7783. {
  7784. return _mm_move_sd(a, _mm_round_pd(b, rounding));
  7785. }
  7786. // Round the lower single-precision (32-bit) floating-point element in b using
  7787. // the rounding parameter, store the result as a single-precision floating-point
  7788. // element in the lower element of dst, and copy the upper 3 packed elements
  7789. // from a to the upper elements of dst. Rounding is done according to the
  7790. // rounding[3:0] parameter, which can be one of:
  7791. // (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and
  7792. // suppress exceptions
  7793. // (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and
  7794. // suppress exceptions
  7795. // (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress
  7796. // exceptions
  7797. // (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress
  7798. // exceptions _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see
  7799. // _MM_SET_ROUNDING_MODE
  7800. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_round_ss
  7801. FORCE_INLINE __m128 _mm_round_ss(__m128 a, __m128 b, int rounding)
  7802. {
  7803. return _mm_move_ss(a, _mm_round_ps(b, rounding));
  7804. }
  7805. // Load 128-bits of integer data from memory into dst using a non-temporal
  7806. // memory hint. mem_addr must be aligned on a 16-byte boundary or a
  7807. // general-protection exception may be generated.
  7808. //
  7809. // dst[127:0] := MEM[mem_addr+127:mem_addr]
  7810. //
  7811. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_load_si128
  7812. FORCE_INLINE __m128i _mm_stream_load_si128(__m128i *p)
  7813. {
  7814. #if __has_builtin(__builtin_nontemporal_store)
  7815. return __builtin_nontemporal_load(p);
  7816. #else
  7817. return vreinterpretq_m128i_s64(vld1q_s64((int64_t *) p));
  7818. #endif
  7819. }
  7820. // Compute the bitwise NOT of a and then AND with a 128-bit vector containing
  7821. // all 1's, and return 1 if the result is zero, otherwise return 0.
  7822. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_test_all_ones
  7823. FORCE_INLINE int _mm_test_all_ones(__m128i a)
  7824. {
  7825. return (uint64_t) (vgetq_lane_s64(a, 0) & vgetq_lane_s64(a, 1)) ==
  7826. ~(uint64_t) 0;
  7827. }
  7828. // Compute the bitwise AND of 128 bits (representing integer data) in a and
  7829. // mask, and return 1 if the result is zero, otherwise return 0.
  7830. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_test_all_zeros
  7831. FORCE_INLINE int _mm_test_all_zeros(__m128i a, __m128i mask)
  7832. {
  7833. int64x2_t a_and_mask =
  7834. vandq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(mask));
  7835. return !(vgetq_lane_s64(a_and_mask, 0) | vgetq_lane_s64(a_and_mask, 1));
  7836. }
  7837. // Compute the bitwise AND of 128 bits (representing integer data) in a and
  7838. // mask, and set ZF to 1 if the result is zero, otherwise set ZF to 0. Compute
  7839. // the bitwise NOT of a and then AND with mask, and set CF to 1 if the result is
  7840. // zero, otherwise set CF to 0. Return 1 if both the ZF and CF values are zero,
  7841. // otherwise return 0.
  7842. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_test_mix_ones_zero
  7843. FORCE_INLINE int _mm_test_mix_ones_zeros(__m128i a, __m128i mask)
  7844. {
  7845. uint64x2_t zf =
  7846. vandq_u64(vreinterpretq_u64_m128i(mask), vreinterpretq_u64_m128i(a));
  7847. uint64x2_t cf =
  7848. vbicq_u64(vreinterpretq_u64_m128i(mask), vreinterpretq_u64_m128i(a));
  7849. uint64x2_t result = vandq_u64(zf, cf);
  7850. return !(vgetq_lane_u64(result, 0) | vgetq_lane_u64(result, 1));
  7851. }
  7852. // Compute the bitwise AND of 128 bits (representing integer data) in a and b,
  7853. // and set ZF to 1 if the result is zero, otherwise set ZF to 0. Compute the
  7854. // bitwise NOT of a and then AND with b, and set CF to 1 if the result is zero,
  7855. // otherwise set CF to 0. Return the CF value.
  7856. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testc_si128
  7857. FORCE_INLINE int _mm_testc_si128(__m128i a, __m128i b)
  7858. {
  7859. int64x2_t s64 =
  7860. vbicq_s64(vreinterpretq_s64_m128i(b), vreinterpretq_s64_m128i(a));
  7861. return !(vgetq_lane_s64(s64, 0) | vgetq_lane_s64(s64, 1));
  7862. }
  7863. // Compute the bitwise AND of 128 bits (representing integer data) in a and b,
  7864. // and set ZF to 1 if the result is zero, otherwise set ZF to 0. Compute the
  7865. // bitwise NOT of a and then AND with b, and set CF to 1 if the result is zero,
  7866. // otherwise set CF to 0. Return 1 if both the ZF and CF values are zero,
  7867. // otherwise return 0.
  7868. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testnzc_si128
  7869. #define _mm_testnzc_si128(a, b) _mm_test_mix_ones_zeros(a, b)
  7870. // Compute the bitwise AND of 128 bits (representing integer data) in a and b,
  7871. // and set ZF to 1 if the result is zero, otherwise set ZF to 0. Compute the
  7872. // bitwise NOT of a and then AND with b, and set CF to 1 if the result is zero,
  7873. // otherwise set CF to 0. Return the ZF value.
  7874. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testz_si128
  7875. FORCE_INLINE int _mm_testz_si128(__m128i a, __m128i b)
  7876. {
  7877. int64x2_t s64 =
  7878. vandq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b));
  7879. return !(vgetq_lane_s64(s64, 0) | vgetq_lane_s64(s64, 1));
  7880. }
  7881. /* SSE4.2 */
  7882. const static uint16_t _sse2neon_cmpestr_mask16b[8] ALIGN_STRUCT(16) = {
  7883. 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
  7884. };
  7885. const static uint8_t _sse2neon_cmpestr_mask8b[16] ALIGN_STRUCT(16) = {
  7886. 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
  7887. 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
  7888. };
  7889. /* specify the source data format */
  7890. #define _SIDD_UBYTE_OPS 0x00 /* unsigned 8-bit characters */
  7891. #define _SIDD_UWORD_OPS 0x01 /* unsigned 16-bit characters */
  7892. #define _SIDD_SBYTE_OPS 0x02 /* signed 8-bit characters */
  7893. #define _SIDD_SWORD_OPS 0x03 /* signed 16-bit characters */
  7894. /* specify the comparison operation */
  7895. #define _SIDD_CMP_EQUAL_ANY 0x00 /* compare equal any: strchr */
  7896. #define _SIDD_CMP_RANGES 0x04 /* compare ranges */
  7897. #define _SIDD_CMP_EQUAL_EACH 0x08 /* compare equal each: strcmp */
  7898. #define _SIDD_CMP_EQUAL_ORDERED 0x0C /* compare equal ordered */
  7899. /* specify the polarity */
  7900. #define _SIDD_POSITIVE_POLARITY 0x00
  7901. #define _SIDD_MASKED_POSITIVE_POLARITY 0x20
  7902. #define _SIDD_NEGATIVE_POLARITY 0x10 /* negate results */
  7903. #define _SIDD_MASKED_NEGATIVE_POLARITY \
  7904. 0x30 /* negate results only before end of string */
  7905. /* specify the output selection in _mm_cmpXstri */
  7906. #define _SIDD_LEAST_SIGNIFICANT 0x00
  7907. #define _SIDD_MOST_SIGNIFICANT 0x40
  7908. /* specify the output selection in _mm_cmpXstrm */
  7909. #define _SIDD_BIT_MASK 0x00
  7910. #define _SIDD_UNIT_MASK 0x40
  7911. /* Pattern Matching for C macros.
  7912. * https://github.com/pfultz2/Cloak/wiki/C-Preprocessor-tricks,-tips,-and-idioms
  7913. */
  7914. /* catenate */
  7915. #define SSE2NEON_PRIMITIVE_CAT(a, ...) a##__VA_ARGS__
  7916. #define SSE2NEON_CAT(a, b) SSE2NEON_PRIMITIVE_CAT(a, b)
  7917. #define SSE2NEON_IIF(c) SSE2NEON_PRIMITIVE_CAT(SSE2NEON_IIF_, c)
  7918. /* run the 2nd parameter */
  7919. #define SSE2NEON_IIF_0(t, ...) __VA_ARGS__
  7920. /* run the 1st parameter */
  7921. #define SSE2NEON_IIF_1(t, ...) t
  7922. #define SSE2NEON_COMPL(b) SSE2NEON_PRIMITIVE_CAT(SSE2NEON_COMPL_, b)
  7923. #define SSE2NEON_COMPL_0 1
  7924. #define SSE2NEON_COMPL_1 0
  7925. #define SSE2NEON_DEC(x) SSE2NEON_PRIMITIVE_CAT(SSE2NEON_DEC_, x)
  7926. #define SSE2NEON_DEC_1 0
  7927. #define SSE2NEON_DEC_2 1
  7928. #define SSE2NEON_DEC_3 2
  7929. #define SSE2NEON_DEC_4 3
  7930. #define SSE2NEON_DEC_5 4
  7931. #define SSE2NEON_DEC_6 5
  7932. #define SSE2NEON_DEC_7 6
  7933. #define SSE2NEON_DEC_8 7
  7934. #define SSE2NEON_DEC_9 8
  7935. #define SSE2NEON_DEC_10 9
  7936. #define SSE2NEON_DEC_11 10
  7937. #define SSE2NEON_DEC_12 11
  7938. #define SSE2NEON_DEC_13 12
  7939. #define SSE2NEON_DEC_14 13
  7940. #define SSE2NEON_DEC_15 14
  7941. #define SSE2NEON_DEC_16 15
  7942. /* detection */
  7943. #define SSE2NEON_CHECK_N(x, n, ...) n
  7944. #define SSE2NEON_CHECK(...) SSE2NEON_CHECK_N(__VA_ARGS__, 0, )
  7945. #define SSE2NEON_PROBE(x) x, 1,
  7946. #define SSE2NEON_NOT(x) SSE2NEON_CHECK(SSE2NEON_PRIMITIVE_CAT(SSE2NEON_NOT_, x))
  7947. #define SSE2NEON_NOT_0 SSE2NEON_PROBE(~)
  7948. #define SSE2NEON_BOOL(x) SSE2NEON_COMPL(SSE2NEON_NOT(x))
  7949. #define SSE2NEON_IF(c) SSE2NEON_IIF(SSE2NEON_BOOL(c))
  7950. #define SSE2NEON_EAT(...)
  7951. #define SSE2NEON_EXPAND(...) __VA_ARGS__
  7952. #define SSE2NEON_WHEN(c) SSE2NEON_IF(c)(SSE2NEON_EXPAND, SSE2NEON_EAT)
  7953. /* recursion */
  7954. /* deferred expression */
  7955. #define SSE2NEON_EMPTY()
  7956. #define SSE2NEON_DEFER(id) id SSE2NEON_EMPTY()
  7957. #define SSE2NEON_OBSTRUCT(...) __VA_ARGS__ SSE2NEON_DEFER(SSE2NEON_EMPTY)()
  7958. #define SSE2NEON_EXPAND(...) __VA_ARGS__
  7959. #define SSE2NEON_EVAL(...) \
  7960. SSE2NEON_EVAL1(SSE2NEON_EVAL1(SSE2NEON_EVAL1(__VA_ARGS__)))
  7961. #define SSE2NEON_EVAL1(...) \
  7962. SSE2NEON_EVAL2(SSE2NEON_EVAL2(SSE2NEON_EVAL2(__VA_ARGS__)))
  7963. #define SSE2NEON_EVAL2(...) \
  7964. SSE2NEON_EVAL3(SSE2NEON_EVAL3(SSE2NEON_EVAL3(__VA_ARGS__)))
  7965. #define SSE2NEON_EVAL3(...) __VA_ARGS__
  7966. #define SSE2NEON_REPEAT(count, macro, ...) \
  7967. SSE2NEON_WHEN(count) \
  7968. (SSE2NEON_OBSTRUCT(SSE2NEON_REPEAT_INDIRECT)()( \
  7969. SSE2NEON_DEC(count), macro, \
  7970. __VA_ARGS__) SSE2NEON_OBSTRUCT(macro)(SSE2NEON_DEC(count), \
  7971. __VA_ARGS__))
  7972. #define SSE2NEON_REPEAT_INDIRECT() SSE2NEON_REPEAT
  7973. #define SSE2NEON_SIZE_OF_byte 8
  7974. #define SSE2NEON_NUMBER_OF_LANES_byte 16
  7975. #define SSE2NEON_SIZE_OF_word 16
  7976. #define SSE2NEON_NUMBER_OF_LANES_word 8
  7977. #define SSE2NEON_COMPARE_EQUAL_THEN_FILL_LANE(i, type) \
  7978. mtx[i] = vreinterpretq_m128i_##type(vceqq_##type( \
  7979. vdupq_n_##type(vgetq_lane_##type(vreinterpretq_##type##_m128i(b), i)), \
  7980. vreinterpretq_##type##_m128i(a)));
  7981. #define SSE2NEON_FILL_LANE(i, type) \
  7982. vec_b[i] = \
  7983. vdupq_n_##type(vgetq_lane_##type(vreinterpretq_##type##_m128i(b), i));
  7984. #define PCMPSTR_RANGES(a, b, mtx, data_type_prefix, type_prefix, size, \
  7985. number_of_lanes, byte_or_word) \
  7986. do { \
  7987. SSE2NEON_CAT( \
  7988. data_type_prefix, \
  7989. SSE2NEON_CAT(size, \
  7990. SSE2NEON_CAT(x, SSE2NEON_CAT(number_of_lanes, _t)))) \
  7991. vec_b[number_of_lanes]; \
  7992. __m128i mask = SSE2NEON_IIF(byte_or_word)( \
  7993. vreinterpretq_m128i_u16(vdupq_n_u16(0xff)), \
  7994. vreinterpretq_m128i_u32(vdupq_n_u32(0xffff))); \
  7995. SSE2NEON_EVAL(SSE2NEON_REPEAT(number_of_lanes, SSE2NEON_FILL_LANE, \
  7996. SSE2NEON_CAT(type_prefix, size))) \
  7997. for (int i = 0; i < number_of_lanes; i++) { \
  7998. mtx[i] = SSE2NEON_CAT(vreinterpretq_m128i_u, \
  7999. size)(SSE2NEON_CAT(vbslq_u, size)( \
  8000. SSE2NEON_CAT(vreinterpretq_u, \
  8001. SSE2NEON_CAT(size, _m128i))(mask), \
  8002. SSE2NEON_CAT(vcgeq_, SSE2NEON_CAT(type_prefix, size))( \
  8003. vec_b[i], \
  8004. SSE2NEON_CAT( \
  8005. vreinterpretq_, \
  8006. SSE2NEON_CAT(type_prefix, \
  8007. SSE2NEON_CAT(size, _m128i(a))))), \
  8008. SSE2NEON_CAT(vcleq_, SSE2NEON_CAT(type_prefix, size))( \
  8009. vec_b[i], \
  8010. SSE2NEON_CAT( \
  8011. vreinterpretq_, \
  8012. SSE2NEON_CAT(type_prefix, \
  8013. SSE2NEON_CAT(size, _m128i(a))))))); \
  8014. } \
  8015. } while (0)
  8016. #define PCMPSTR_EQ(a, b, mtx, size, number_of_lanes) \
  8017. do { \
  8018. SSE2NEON_EVAL(SSE2NEON_REPEAT(number_of_lanes, \
  8019. SSE2NEON_COMPARE_EQUAL_THEN_FILL_LANE, \
  8020. SSE2NEON_CAT(u, size))) \
  8021. } while (0)
  8022. #define SSE2NEON_CMP_EQUAL_ANY_IMPL(type) \
  8023. static int _sse2neon_cmp_##type##_equal_any(__m128i a, int la, __m128i b, \
  8024. int lb) \
  8025. { \
  8026. __m128i mtx[16]; \
  8027. PCMPSTR_EQ(a, b, mtx, SSE2NEON_CAT(SSE2NEON_SIZE_OF_, type), \
  8028. SSE2NEON_CAT(SSE2NEON_NUMBER_OF_LANES_, type)); \
  8029. return SSE2NEON_CAT( \
  8030. _sse2neon_aggregate_equal_any_, \
  8031. SSE2NEON_CAT( \
  8032. SSE2NEON_CAT(SSE2NEON_SIZE_OF_, type), \
  8033. SSE2NEON_CAT(x, SSE2NEON_CAT(SSE2NEON_NUMBER_OF_LANES_, \
  8034. type))))(la, lb, mtx); \
  8035. }
  8036. #define SSE2NEON_CMP_RANGES_IMPL(type, data_type, us, byte_or_word) \
  8037. static int _sse2neon_cmp_##us##type##_ranges(__m128i a, int la, __m128i b, \
  8038. int lb) \
  8039. { \
  8040. __m128i mtx[16]; \
  8041. PCMPSTR_RANGES( \
  8042. a, b, mtx, data_type, us, SSE2NEON_CAT(SSE2NEON_SIZE_OF_, type), \
  8043. SSE2NEON_CAT(SSE2NEON_NUMBER_OF_LANES_, type), byte_or_word); \
  8044. return SSE2NEON_CAT( \
  8045. _sse2neon_aggregate_ranges_, \
  8046. SSE2NEON_CAT( \
  8047. SSE2NEON_CAT(SSE2NEON_SIZE_OF_, type), \
  8048. SSE2NEON_CAT(x, SSE2NEON_CAT(SSE2NEON_NUMBER_OF_LANES_, \
  8049. type))))(la, lb, mtx); \
  8050. }
  8051. #define SSE2NEON_CMP_EQUAL_ORDERED_IMPL(type) \
  8052. static int _sse2neon_cmp_##type##_equal_ordered(__m128i a, int la, \
  8053. __m128i b, int lb) \
  8054. { \
  8055. __m128i mtx[16]; \
  8056. PCMPSTR_EQ(a, b, mtx, SSE2NEON_CAT(SSE2NEON_SIZE_OF_, type), \
  8057. SSE2NEON_CAT(SSE2NEON_NUMBER_OF_LANES_, type)); \
  8058. return SSE2NEON_CAT( \
  8059. _sse2neon_aggregate_equal_ordered_, \
  8060. SSE2NEON_CAT( \
  8061. SSE2NEON_CAT(SSE2NEON_SIZE_OF_, type), \
  8062. SSE2NEON_CAT(x, \
  8063. SSE2NEON_CAT(SSE2NEON_NUMBER_OF_LANES_, type))))( \
  8064. SSE2NEON_CAT(SSE2NEON_NUMBER_OF_LANES_, type), la, lb, mtx); \
  8065. }
  8066. static int _sse2neon_aggregate_equal_any_8x16(int la, int lb, __m128i mtx[16])
  8067. {
  8068. int res = 0;
  8069. int m = (1 << la) - 1;
  8070. uint8x8_t vec_mask = vld1_u8(_sse2neon_cmpestr_mask8b);
  8071. uint8x8_t t_lo = vtst_u8(vdup_n_u8(m & 0xff), vec_mask);
  8072. uint8x8_t t_hi = vtst_u8(vdup_n_u8(m >> 8), vec_mask);
  8073. uint8x16_t vec = vcombine_u8(t_lo, t_hi);
  8074. for (int j = 0; j < lb; j++) {
  8075. mtx[j] = vreinterpretq_m128i_u8(
  8076. vandq_u8(vec, vreinterpretq_u8_m128i(mtx[j])));
  8077. mtx[j] = vreinterpretq_m128i_u8(
  8078. vshrq_n_u8(vreinterpretq_u8_m128i(mtx[j]), 7));
  8079. int tmp = _sse2neon_vaddvq_u8(vreinterpretq_u8_m128i(mtx[j])) ? 1 : 0;
  8080. res |= (tmp << j);
  8081. }
  8082. return res;
  8083. }
  8084. static int _sse2neon_aggregate_equal_any_16x8(int la, int lb, __m128i mtx[16])
  8085. {
  8086. int res = 0;
  8087. int m = (1 << la) - 1;
  8088. uint16x8_t vec =
  8089. vtstq_u16(vdupq_n_u16(m), vld1q_u16(_sse2neon_cmpestr_mask16b));
  8090. for (int j = 0; j < lb; j++) {
  8091. mtx[j] = vreinterpretq_m128i_u16(
  8092. vandq_u16(vec, vreinterpretq_u16_m128i(mtx[j])));
  8093. mtx[j] = vreinterpretq_m128i_u16(
  8094. vshrq_n_u16(vreinterpretq_u16_m128i(mtx[j]), 15));
  8095. int tmp = _sse2neon_vaddvq_u16(vreinterpretq_u16_m128i(mtx[j])) ? 1 : 0;
  8096. res |= (tmp << j);
  8097. }
  8098. return res;
  8099. }
  8100. /* clang-format off */
  8101. #define SSE2NEON_GENERATE_CMP_EQUAL_ANY(prefix) \
  8102. prefix##IMPL(byte) \
  8103. prefix##IMPL(word)
  8104. /* clang-format on */
  8105. SSE2NEON_GENERATE_CMP_EQUAL_ANY(SSE2NEON_CMP_EQUAL_ANY_)
  8106. static int _sse2neon_aggregate_ranges_16x8(int la, int lb, __m128i mtx[16])
  8107. {
  8108. int res = 0;
  8109. int m = (1 << la) - 1;
  8110. uint16x8_t vec =
  8111. vtstq_u16(vdupq_n_u16(m), vld1q_u16(_sse2neon_cmpestr_mask16b));
  8112. for (int j = 0; j < lb; j++) {
  8113. mtx[j] = vreinterpretq_m128i_u16(
  8114. vandq_u16(vec, vreinterpretq_u16_m128i(mtx[j])));
  8115. mtx[j] = vreinterpretq_m128i_u16(
  8116. vshrq_n_u16(vreinterpretq_u16_m128i(mtx[j]), 15));
  8117. __m128i tmp = vreinterpretq_m128i_u32(
  8118. vshrq_n_u32(vreinterpretq_u32_m128i(mtx[j]), 16));
  8119. uint32x4_t vec_res = vandq_u32(vreinterpretq_u32_m128i(mtx[j]),
  8120. vreinterpretq_u32_m128i(tmp));
  8121. #if defined(__aarch64__)
  8122. int t = vaddvq_u32(vec_res) ? 1 : 0;
  8123. #else
  8124. uint64x2_t sumh = vpaddlq_u32(vec_res);
  8125. int t = vgetq_lane_u64(sumh, 0) + vgetq_lane_u64(sumh, 1);
  8126. #endif
  8127. res |= (t << j);
  8128. }
  8129. return res;
  8130. }
  8131. static int _sse2neon_aggregate_ranges_8x16(int la, int lb, __m128i mtx[16])
  8132. {
  8133. int res = 0;
  8134. int m = (1 << la) - 1;
  8135. uint8x8_t vec_mask = vld1_u8(_sse2neon_cmpestr_mask8b);
  8136. uint8x8_t t_lo = vtst_u8(vdup_n_u8(m & 0xff), vec_mask);
  8137. uint8x8_t t_hi = vtst_u8(vdup_n_u8(m >> 8), vec_mask);
  8138. uint8x16_t vec = vcombine_u8(t_lo, t_hi);
  8139. for (int j = 0; j < lb; j++) {
  8140. mtx[j] = vreinterpretq_m128i_u8(
  8141. vandq_u8(vec, vreinterpretq_u8_m128i(mtx[j])));
  8142. mtx[j] = vreinterpretq_m128i_u8(
  8143. vshrq_n_u8(vreinterpretq_u8_m128i(mtx[j]), 7));
  8144. __m128i tmp = vreinterpretq_m128i_u16(
  8145. vshrq_n_u16(vreinterpretq_u16_m128i(mtx[j]), 8));
  8146. uint16x8_t vec_res = vandq_u16(vreinterpretq_u16_m128i(mtx[j]),
  8147. vreinterpretq_u16_m128i(tmp));
  8148. int t = _sse2neon_vaddvq_u16(vec_res) ? 1 : 0;
  8149. res |= (t << j);
  8150. }
  8151. return res;
  8152. }
  8153. #define SSE2NEON_CMP_RANGES_IS_BYTE 1
  8154. #define SSE2NEON_CMP_RANGES_IS_WORD 0
  8155. /* clang-format off */
  8156. #define SSE2NEON_GENERATE_CMP_RANGES(prefix) \
  8157. prefix##IMPL(byte, uint, u, prefix##IS_BYTE) \
  8158. prefix##IMPL(byte, int, s, prefix##IS_BYTE) \
  8159. prefix##IMPL(word, uint, u, prefix##IS_WORD) \
  8160. prefix##IMPL(word, int, s, prefix##IS_WORD)
  8161. /* clang-format on */
  8162. SSE2NEON_GENERATE_CMP_RANGES(SSE2NEON_CMP_RANGES_)
  8163. #undef SSE2NEON_CMP_RANGES_IS_BYTE
  8164. #undef SSE2NEON_CMP_RANGES_IS_WORD
  8165. static int _sse2neon_cmp_byte_equal_each(__m128i a, int la, __m128i b, int lb)
  8166. {
  8167. uint8x16_t mtx =
  8168. vceqq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b));
  8169. int m0 = (la < lb) ? 0 : ((1 << la) - (1 << lb));
  8170. int m1 = 0x10000 - (1 << la);
  8171. int tb = 0x10000 - (1 << lb);
  8172. uint8x8_t vec_mask, vec0_lo, vec0_hi, vec1_lo, vec1_hi;
  8173. uint8x8_t tmp_lo, tmp_hi, res_lo, res_hi;
  8174. vec_mask = vld1_u8(_sse2neon_cmpestr_mask8b);
  8175. vec0_lo = vtst_u8(vdup_n_u8(m0), vec_mask);
  8176. vec0_hi = vtst_u8(vdup_n_u8(m0 >> 8), vec_mask);
  8177. vec1_lo = vtst_u8(vdup_n_u8(m1), vec_mask);
  8178. vec1_hi = vtst_u8(vdup_n_u8(m1 >> 8), vec_mask);
  8179. tmp_lo = vtst_u8(vdup_n_u8(tb), vec_mask);
  8180. tmp_hi = vtst_u8(vdup_n_u8(tb >> 8), vec_mask);
  8181. res_lo = vbsl_u8(vec0_lo, vdup_n_u8(0), vget_low_u8(mtx));
  8182. res_hi = vbsl_u8(vec0_hi, vdup_n_u8(0), vget_high_u8(mtx));
  8183. res_lo = vbsl_u8(vec1_lo, tmp_lo, res_lo);
  8184. res_hi = vbsl_u8(vec1_hi, tmp_hi, res_hi);
  8185. res_lo = vand_u8(res_lo, vec_mask);
  8186. res_hi = vand_u8(res_hi, vec_mask);
  8187. int res = _sse2neon_vaddv_u8(res_lo) + (_sse2neon_vaddv_u8(res_hi) << 8);
  8188. return res;
  8189. }
  8190. static int _sse2neon_cmp_word_equal_each(__m128i a, int la, __m128i b, int lb)
  8191. {
  8192. uint16x8_t mtx =
  8193. vceqq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b));
  8194. int m0 = (la < lb) ? 0 : ((1 << la) - (1 << lb));
  8195. int m1 = 0x100 - (1 << la);
  8196. int tb = 0x100 - (1 << lb);
  8197. uint16x8_t vec_mask = vld1q_u16(_sse2neon_cmpestr_mask16b);
  8198. uint16x8_t vec0 = vtstq_u16(vdupq_n_u16(m0), vec_mask);
  8199. uint16x8_t vec1 = vtstq_u16(vdupq_n_u16(m1), vec_mask);
  8200. uint16x8_t tmp = vtstq_u16(vdupq_n_u16(tb), vec_mask);
  8201. mtx = vbslq_u16(vec0, vdupq_n_u16(0), mtx);
  8202. mtx = vbslq_u16(vec1, tmp, mtx);
  8203. mtx = vandq_u16(mtx, vec_mask);
  8204. return _sse2neon_vaddvq_u16(mtx);
  8205. }
  8206. #define SSE2NEON_AGGREGATE_EQUAL_ORDER_IS_UBYTE 1
  8207. #define SSE2NEON_AGGREGATE_EQUAL_ORDER_IS_UWORD 0
  8208. #define SSE2NEON_AGGREGATE_EQUAL_ORDER_IMPL(size, number_of_lanes, data_type) \
  8209. static int _sse2neon_aggregate_equal_ordered_##size##x##number_of_lanes( \
  8210. int bound, int la, int lb, __m128i mtx[16]) \
  8211. { \
  8212. int res = 0; \
  8213. int m1 = SSE2NEON_IIF(data_type)(0x10000, 0x100) - (1 << la); \
  8214. uint##size##x8_t vec_mask = SSE2NEON_IIF(data_type)( \
  8215. vld1_u##size(_sse2neon_cmpestr_mask##size##b), \
  8216. vld1q_u##size(_sse2neon_cmpestr_mask##size##b)); \
  8217. uint##size##x##number_of_lanes##_t vec1 = SSE2NEON_IIF(data_type)( \
  8218. vcombine_u##size(vtst_u##size(vdup_n_u##size(m1), vec_mask), \
  8219. vtst_u##size(vdup_n_u##size(m1 >> 8), vec_mask)), \
  8220. vtstq_u##size(vdupq_n_u##size(m1), vec_mask)); \
  8221. uint##size##x##number_of_lanes##_t vec_minusone = vdupq_n_u##size(-1); \
  8222. uint##size##x##number_of_lanes##_t vec_zero = vdupq_n_u##size(0); \
  8223. for (int j = 0; j < lb; j++) { \
  8224. mtx[j] = vreinterpretq_m128i_u##size(vbslq_u##size( \
  8225. vec1, vec_minusone, vreinterpretq_u##size##_m128i(mtx[j]))); \
  8226. } \
  8227. for (int j = lb; j < bound; j++) { \
  8228. mtx[j] = vreinterpretq_m128i_u##size( \
  8229. vbslq_u##size(vec1, vec_minusone, vec_zero)); \
  8230. } \
  8231. unsigned SSE2NEON_IIF(data_type)(char, short) *ptr = \
  8232. (unsigned SSE2NEON_IIF(data_type)(char, short) *) mtx; \
  8233. for (int i = 0; i < bound; i++) { \
  8234. int val = 1; \
  8235. for (int j = 0, k = i; j < bound - i && k < bound; j++, k++) \
  8236. val &= ptr[k * bound + j]; \
  8237. res += val << i; \
  8238. } \
  8239. return res; \
  8240. }
  8241. /* clang-format off */
  8242. #define SSE2NEON_GENERATE_AGGREGATE_EQUAL_ORDER(prefix) \
  8243. prefix##IMPL(8, 16, prefix##IS_UBYTE) \
  8244. prefix##IMPL(16, 8, prefix##IS_UWORD)
  8245. /* clang-format on */
  8246. SSE2NEON_GENERATE_AGGREGATE_EQUAL_ORDER(SSE2NEON_AGGREGATE_EQUAL_ORDER_)
  8247. #undef SSE2NEON_AGGREGATE_EQUAL_ORDER_IS_UBYTE
  8248. #undef SSE2NEON_AGGREGATE_EQUAL_ORDER_IS_UWORD
  8249. /* clang-format off */
  8250. #define SSE2NEON_GENERATE_CMP_EQUAL_ORDERED(prefix) \
  8251. prefix##IMPL(byte) \
  8252. prefix##IMPL(word)
  8253. /* clang-format on */
  8254. SSE2NEON_GENERATE_CMP_EQUAL_ORDERED(SSE2NEON_CMP_EQUAL_ORDERED_)
  8255. #define SSE2NEON_CMPESTR_LIST \
  8256. _(CMP_UBYTE_EQUAL_ANY, cmp_byte_equal_any) \
  8257. _(CMP_UWORD_EQUAL_ANY, cmp_word_equal_any) \
  8258. _(CMP_SBYTE_EQUAL_ANY, cmp_byte_equal_any) \
  8259. _(CMP_SWORD_EQUAL_ANY, cmp_word_equal_any) \
  8260. _(CMP_UBYTE_RANGES, cmp_ubyte_ranges) \
  8261. _(CMP_UWORD_RANGES, cmp_uword_ranges) \
  8262. _(CMP_SBYTE_RANGES, cmp_sbyte_ranges) \
  8263. _(CMP_SWORD_RANGES, cmp_sword_ranges) \
  8264. _(CMP_UBYTE_EQUAL_EACH, cmp_byte_equal_each) \
  8265. _(CMP_UWORD_EQUAL_EACH, cmp_word_equal_each) \
  8266. _(CMP_SBYTE_EQUAL_EACH, cmp_byte_equal_each) \
  8267. _(CMP_SWORD_EQUAL_EACH, cmp_word_equal_each) \
  8268. _(CMP_UBYTE_EQUAL_ORDERED, cmp_byte_equal_ordered) \
  8269. _(CMP_UWORD_EQUAL_ORDERED, cmp_word_equal_ordered) \
  8270. _(CMP_SBYTE_EQUAL_ORDERED, cmp_byte_equal_ordered) \
  8271. _(CMP_SWORD_EQUAL_ORDERED, cmp_word_equal_ordered)
  8272. enum {
  8273. #define _(name, func_suffix) name,
  8274. SSE2NEON_CMPESTR_LIST
  8275. #undef _
  8276. };
  8277. typedef int (*cmpestr_func_t)(__m128i a, int la, __m128i b, int lb);
  8278. static cmpestr_func_t _sse2neon_cmpfunc_table[] = {
  8279. #define _(name, func_suffix) _sse2neon_##func_suffix,
  8280. SSE2NEON_CMPESTR_LIST
  8281. #undef _
  8282. };
  8283. FORCE_INLINE int _sse2neon_sido_negative(int res, int lb, int imm8, int bound)
  8284. {
  8285. switch (imm8 & 0x30) {
  8286. case _SIDD_NEGATIVE_POLARITY:
  8287. res ^= 0xffffffff;
  8288. break;
  8289. case _SIDD_MASKED_NEGATIVE_POLARITY:
  8290. res ^= (1 << lb) - 1;
  8291. break;
  8292. default:
  8293. break;
  8294. }
  8295. return res & ((bound == 8) ? 0xFF : 0xFFFF);
  8296. }
  8297. FORCE_INLINE int _sse2neon_clz(unsigned int x)
  8298. {
  8299. #if _MSC_VER
  8300. DWORD cnt = 0;
  8301. if (_BitScanForward(&cnt, x))
  8302. return cnt;
  8303. return 32;
  8304. #else
  8305. return x != 0 ? __builtin_clz(x) : 32;
  8306. #endif
  8307. }
  8308. FORCE_INLINE int _sse2neon_ctz(unsigned int x)
  8309. {
  8310. #if _MSC_VER
  8311. DWORD cnt = 0;
  8312. if (_BitScanReverse(&cnt, x))
  8313. return 31 - cnt;
  8314. return 32;
  8315. #else
  8316. return x != 0 ? __builtin_ctz(x) : 32;
  8317. #endif
  8318. }
  8319. FORCE_INLINE int _sse2neon_ctzll(unsigned long long x)
  8320. {
  8321. #if _MSC_VER
  8322. unsigned long cnt;
  8323. #ifdef defined(SSE2NEON_HAS_BITSCAN64)
  8324. (defined(_M_AMD64) || defined(__x86_64__))
  8325. if((_BitScanForward64(&cnt, x))
  8326. return (int)(cnt);
  8327. #else
  8328. if (_BitScanForward(&cnt, (unsigned long) (x)))
  8329. return (int) cnt;
  8330. if (_BitScanForward(&cnt, (unsigned long) (x >> 32)))
  8331. return (int) (cnt + 32);
  8332. #endif
  8333. return 64;
  8334. #else
  8335. return x != 0 ? __builtin_ctzll(x) : 64;
  8336. #endif
  8337. }
  8338. #define SSE2NEON_MIN(x, y) (x) < (y) ? (x) : (y)
  8339. #define SSE2NEON_CMPSTR_SET_UPPER(var, imm) \
  8340. const int var = (imm & 0x01) ? 8 : 16
  8341. #define SSE2NEON_CMPESTRX_LEN_PAIR(a, b, la, lb) \
  8342. int tmp1 = la ^ (la >> 31); \
  8343. la = tmp1 - (la >> 31); \
  8344. int tmp2 = lb ^ (lb >> 31); \
  8345. lb = tmp2 - (lb >> 31); \
  8346. la = SSE2NEON_MIN(la, bound); \
  8347. lb = SSE2NEON_MIN(lb, bound)
  8348. // Compare all pairs of character in string a and b,
  8349. // then aggregate the result.
  8350. // As the only difference of PCMPESTR* and PCMPISTR* is the way to calculate the
  8351. // length of string, we use SSE2NEON_CMP{I,E}STRX_GET_LEN to get the length of
  8352. // string a and b.
  8353. #define SSE2NEON_COMP_AGG(a, b, la, lb, imm8, IE) \
  8354. SSE2NEON_CMPSTR_SET_UPPER(bound, imm8); \
  8355. SSE2NEON_##IE##_LEN_PAIR(a, b, la, lb); \
  8356. int r2 = (_sse2neon_cmpfunc_table[imm8 & 0x0f])(a, la, b, lb); \
  8357. r2 = _sse2neon_sido_negative(r2, lb, imm8, bound)
  8358. #define SSE2NEON_CMPSTR_GENERATE_INDEX(r2, bound, imm8) \
  8359. return (r2 == 0) ? bound \
  8360. : ((imm8 & 0x40) ? (31 - _sse2neon_clz(r2)) \
  8361. : _sse2neon_ctz(r2))
  8362. #define SSE2NEON_CMPSTR_GENERATE_MASK(dst) \
  8363. __m128i dst = vreinterpretq_m128i_u8(vdupq_n_u8(0)); \
  8364. if (imm8 & 0x40) { \
  8365. if (bound == 8) { \
  8366. uint16x8_t tmp = vtstq_u16(vdupq_n_u16(r2), \
  8367. vld1q_u16(_sse2neon_cmpestr_mask16b)); \
  8368. dst = vreinterpretq_m128i_u16(vbslq_u16( \
  8369. tmp, vdupq_n_u16(-1), vreinterpretq_u16_m128i(dst))); \
  8370. } else { \
  8371. uint8x16_t vec_r2 = \
  8372. vcombine_u8(vdup_n_u8(r2), vdup_n_u8(r2 >> 8)); \
  8373. uint8x16_t tmp = \
  8374. vtstq_u8(vec_r2, vld1q_u8(_sse2neon_cmpestr_mask8b)); \
  8375. dst = vreinterpretq_m128i_u8( \
  8376. vbslq_u8(tmp, vdupq_n_u8(-1), vreinterpretq_u8_m128i(dst))); \
  8377. } \
  8378. } else { \
  8379. if (bound == 16) { \
  8380. dst = vreinterpretq_m128i_u16( \
  8381. vsetq_lane_u16(r2 & 0xffff, vreinterpretq_u16_m128i(dst), 0)); \
  8382. } else { \
  8383. dst = vreinterpretq_m128i_u8( \
  8384. vsetq_lane_u8(r2 & 0xff, vreinterpretq_u8_m128i(dst), 0)); \
  8385. } \
  8386. } \
  8387. return dst
  8388. // Compare packed strings in a and b with lengths la and lb using the control
  8389. // in imm8, and returns 1 if b did not contain a null character and the
  8390. // resulting mask was zero, and 0 otherwise.
  8391. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestra
  8392. FORCE_INLINE int _mm_cmpestra(__m128i a,
  8393. int la,
  8394. __m128i b,
  8395. int lb,
  8396. const int imm8)
  8397. {
  8398. int lb_cpy = lb;
  8399. SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPESTRX);
  8400. return !r2 & (lb_cpy > bound);
  8401. }
  8402. // Compare packed strings in a and b with lengths la and lb using the control in
  8403. // imm8, and returns 1 if the resulting mask was non-zero, and 0 otherwise.
  8404. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestrc
  8405. FORCE_INLINE int _mm_cmpestrc(__m128i a,
  8406. int la,
  8407. __m128i b,
  8408. int lb,
  8409. const int imm8)
  8410. {
  8411. SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPESTRX);
  8412. return r2 != 0;
  8413. }
  8414. // Compare packed strings in a and b with lengths la and lb using the control
  8415. // in imm8, and store the generated index in dst.
  8416. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestri
  8417. FORCE_INLINE int _mm_cmpestri(__m128i a,
  8418. int la,
  8419. __m128i b,
  8420. int lb,
  8421. const int imm8)
  8422. {
  8423. SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPESTRX);
  8424. SSE2NEON_CMPSTR_GENERATE_INDEX(r2, bound, imm8);
  8425. }
  8426. // Compare packed strings in a and b with lengths la and lb using the control
  8427. // in imm8, and store the generated mask in dst.
  8428. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestrm
  8429. FORCE_INLINE __m128i
  8430. _mm_cmpestrm(__m128i a, int la, __m128i b, int lb, const int imm8)
  8431. {
  8432. SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPESTRX);
  8433. SSE2NEON_CMPSTR_GENERATE_MASK(dst);
  8434. }
  8435. // Compare packed strings in a and b with lengths la and lb using the control in
  8436. // imm8, and returns bit 0 of the resulting bit mask.
  8437. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestro
  8438. FORCE_INLINE int _mm_cmpestro(__m128i a,
  8439. int la,
  8440. __m128i b,
  8441. int lb,
  8442. const int imm8)
  8443. {
  8444. SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPESTRX);
  8445. return r2 & 1;
  8446. }
  8447. // Compare packed strings in a and b with lengths la and lb using the control in
  8448. // imm8, and returns 1 if any character in a was null, and 0 otherwise.
  8449. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestrs
  8450. FORCE_INLINE int _mm_cmpestrs(__m128i a,
  8451. int la,
  8452. __m128i b,
  8453. int lb,
  8454. const int imm8)
  8455. {
  8456. SSE2NEON_CMPSTR_SET_UPPER(bound, imm8);
  8457. return la <= (bound - 1);
  8458. }
  8459. // Compare packed strings in a and b with lengths la and lb using the control in
  8460. // imm8, and returns 1 if any character in b was null, and 0 otherwise.
  8461. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestrz
  8462. FORCE_INLINE int _mm_cmpestrz(__m128i a,
  8463. int la,
  8464. __m128i b,
  8465. int lb,
  8466. const int imm8)
  8467. {
  8468. SSE2NEON_CMPSTR_SET_UPPER(bound, imm8);
  8469. return lb <= (bound - 1);
  8470. }
  8471. #define SSE2NEON_CMPISTRX_LENGTH(str, len, imm8) \
  8472. do { \
  8473. if (imm8 & 0x01) { \
  8474. uint16x8_t equal_mask_##str = \
  8475. vceqq_u16(vreinterpretq_u16_m128i(str), vdupq_n_u16(0)); \
  8476. uint8x8_t res_##str = vshrn_n_u16(equal_mask_##str, 4); \
  8477. uint64_t matches_##str = \
  8478. vget_lane_u64(vreinterpret_u64_u8(res_##str), 0); \
  8479. len = _sse2neon_ctzll(matches_##str) >> 3; \
  8480. } else { \
  8481. uint16x8_t equal_mask_##str = vreinterpretq_u16_u8( \
  8482. vceqq_u8(vreinterpretq_u8_m128i(str), vdupq_n_u8(0))); \
  8483. uint8x8_t res_##str = vshrn_n_u16(equal_mask_##str, 4); \
  8484. uint64_t matches_##str = \
  8485. vget_lane_u64(vreinterpret_u64_u8(res_##str), 0); \
  8486. len = _sse2neon_ctzll(matches_##str) >> 2; \
  8487. } \
  8488. } while (0)
  8489. #define SSE2NEON_CMPISTRX_LEN_PAIR(a, b, la, lb) \
  8490. int la, lb; \
  8491. do { \
  8492. SSE2NEON_CMPISTRX_LENGTH(a, la, imm8); \
  8493. SSE2NEON_CMPISTRX_LENGTH(b, lb, imm8); \
  8494. } while (0)
  8495. // Compare packed strings with implicit lengths in a and b using the control in
  8496. // imm8, and returns 1 if b did not contain a null character and the resulting
  8497. // mask was zero, and 0 otherwise.
  8498. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistra
  8499. FORCE_INLINE int _mm_cmpistra(__m128i a, __m128i b, const int imm8)
  8500. {
  8501. SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPISTRX);
  8502. return !r2 & (lb >= bound);
  8503. }
  8504. // Compare packed strings with implicit lengths in a and b using the control in
  8505. // imm8, and returns 1 if the resulting mask was non-zero, and 0 otherwise.
  8506. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistrc
  8507. FORCE_INLINE int _mm_cmpistrc(__m128i a, __m128i b, const int imm8)
  8508. {
  8509. SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPISTRX);
  8510. return r2 != 0;
  8511. }
  8512. // Compare packed strings with implicit lengths in a and b using the control in
  8513. // imm8, and store the generated index in dst.
  8514. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistri
  8515. FORCE_INLINE int _mm_cmpistri(__m128i a, __m128i b, const int imm8)
  8516. {
  8517. SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPISTRX);
  8518. SSE2NEON_CMPSTR_GENERATE_INDEX(r2, bound, imm8);
  8519. }
  8520. // Compare packed strings with implicit lengths in a and b using the control in
  8521. // imm8, and store the generated mask in dst.
  8522. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistrm
  8523. FORCE_INLINE __m128i _mm_cmpistrm(__m128i a, __m128i b, const int imm8)
  8524. {
  8525. SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPISTRX);
  8526. SSE2NEON_CMPSTR_GENERATE_MASK(dst);
  8527. }
  8528. // Compare packed strings with implicit lengths in a and b using the control in
  8529. // imm8, and returns bit 0 of the resulting bit mask.
  8530. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistro
  8531. FORCE_INLINE int _mm_cmpistro(__m128i a, __m128i b, const int imm8)
  8532. {
  8533. SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPISTRX);
  8534. return r2 & 1;
  8535. }
  8536. // Compare packed strings with implicit lengths in a and b using the control in
  8537. // imm8, and returns 1 if any character in a was null, and 0 otherwise.
  8538. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistrs
  8539. FORCE_INLINE int _mm_cmpistrs(__m128i a, __m128i b, const int imm8)
  8540. {
  8541. SSE2NEON_CMPSTR_SET_UPPER(bound, imm8);
  8542. int la;
  8543. SSE2NEON_CMPISTRX_LENGTH(a, la, imm8);
  8544. return la <= (bound - 1);
  8545. }
  8546. // Compare packed strings with implicit lengths in a and b using the control in
  8547. // imm8, and returns 1 if any character in b was null, and 0 otherwise.
  8548. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistrz
  8549. FORCE_INLINE int _mm_cmpistrz(__m128i a, __m128i b, const int imm8)
  8550. {
  8551. SSE2NEON_CMPSTR_SET_UPPER(bound, imm8);
  8552. int lb;
  8553. SSE2NEON_CMPISTRX_LENGTH(b, lb, imm8);
  8554. return lb <= (bound - 1);
  8555. }
  8556. // Compares the 2 signed 64-bit integers in a and the 2 signed 64-bit integers
  8557. // in b for greater than.
  8558. FORCE_INLINE __m128i _mm_cmpgt_epi64(__m128i a, __m128i b)
  8559. {
  8560. #if defined(__aarch64__)
  8561. return vreinterpretq_m128i_u64(
  8562. vcgtq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b)));
  8563. #else
  8564. return vreinterpretq_m128i_s64(vshrq_n_s64(
  8565. vqsubq_s64(vreinterpretq_s64_m128i(b), vreinterpretq_s64_m128i(a)),
  8566. 63));
  8567. #endif
  8568. }
  8569. // Starting with the initial value in crc, accumulates a CRC32 value for
  8570. // unsigned 16-bit integer v.
  8571. // https://msdn.microsoft.com/en-us/library/bb531411(v=vs.100)
  8572. FORCE_INLINE uint32_t _mm_crc32_u16(uint32_t crc, uint16_t v)
  8573. {
  8574. #if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
  8575. __asm__ __volatile__("crc32ch %w[c], %w[c], %w[v]\n\t"
  8576. : [c] "+r"(crc)
  8577. : [v] "r"(v));
  8578. #elif (__ARM_ARCH == 8) && defined(__ARM_FEATURE_CRC32)
  8579. crc = __crc32ch(crc, v);
  8580. #else
  8581. crc = _mm_crc32_u8(crc, v & 0xff);
  8582. crc = _mm_crc32_u8(crc, (v >> 8) & 0xff);
  8583. #endif
  8584. return crc;
  8585. }
  8586. // Starting with the initial value in crc, accumulates a CRC32 value for
  8587. // unsigned 32-bit integer v.
  8588. // https://msdn.microsoft.com/en-us/library/bb531394(v=vs.100)
  8589. FORCE_INLINE uint32_t _mm_crc32_u32(uint32_t crc, uint32_t v)
  8590. {
  8591. #if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
  8592. __asm__ __volatile__("crc32cw %w[c], %w[c], %w[v]\n\t"
  8593. : [c] "+r"(crc)
  8594. : [v] "r"(v));
  8595. #elif (__ARM_ARCH == 8) && defined(__ARM_FEATURE_CRC32)
  8596. crc = __crc32cw(crc, v);
  8597. #else
  8598. crc = _mm_crc32_u16(crc, v & 0xffff);
  8599. crc = _mm_crc32_u16(crc, (v >> 16) & 0xffff);
  8600. #endif
  8601. return crc;
  8602. }
  8603. // Starting with the initial value in crc, accumulates a CRC32 value for
  8604. // unsigned 64-bit integer v.
  8605. // https://msdn.microsoft.com/en-us/library/bb514033(v=vs.100)
  8606. FORCE_INLINE uint64_t _mm_crc32_u64(uint64_t crc, uint64_t v)
  8607. {
  8608. #if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
  8609. __asm__ __volatile__("crc32cx %w[c], %w[c], %x[v]\n\t"
  8610. : [c] "+r"(crc)
  8611. : [v] "r"(v));
  8612. #else
  8613. crc = _mm_crc32_u32((uint32_t) (crc), v & 0xffffffff);
  8614. crc = _mm_crc32_u32((uint32_t) (crc), (v >> 32) & 0xffffffff);
  8615. #endif
  8616. return crc;
  8617. }
  8618. // Starting with the initial value in crc, accumulates a CRC32 value for
  8619. // unsigned 8-bit integer v.
  8620. // https://msdn.microsoft.com/en-us/library/bb514036(v=vs.100)
  8621. FORCE_INLINE uint32_t _mm_crc32_u8(uint32_t crc, uint8_t v)
  8622. {
  8623. #if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
  8624. __asm__ __volatile__("crc32cb %w[c], %w[c], %w[v]\n\t"
  8625. : [c] "+r"(crc)
  8626. : [v] "r"(v));
  8627. #elif (__ARM_ARCH == 8) && defined(__ARM_FEATURE_CRC32)
  8628. crc = __crc32cb(crc, v);
  8629. #else
  8630. crc ^= v;
  8631. for (int bit = 0; bit < 8; bit++) {
  8632. if (crc & 1)
  8633. crc = (crc >> 1) ^ UINT32_C(0x82f63b78);
  8634. else
  8635. crc = (crc >> 1);
  8636. }
  8637. #endif
  8638. return crc;
  8639. }
  8640. /* AES */
  8641. #if !defined(__ARM_FEATURE_CRYPTO)
  8642. /* clang-format off */
  8643. #define SSE2NEON_AES_SBOX(w) \
  8644. { \
  8645. w(0x63), w(0x7c), w(0x77), w(0x7b), w(0xf2), w(0x6b), w(0x6f), \
  8646. w(0xc5), w(0x30), w(0x01), w(0x67), w(0x2b), w(0xfe), w(0xd7), \
  8647. w(0xab), w(0x76), w(0xca), w(0x82), w(0xc9), w(0x7d), w(0xfa), \
  8648. w(0x59), w(0x47), w(0xf0), w(0xad), w(0xd4), w(0xa2), w(0xaf), \
  8649. w(0x9c), w(0xa4), w(0x72), w(0xc0), w(0xb7), w(0xfd), w(0x93), \
  8650. w(0x26), w(0x36), w(0x3f), w(0xf7), w(0xcc), w(0x34), w(0xa5), \
  8651. w(0xe5), w(0xf1), w(0x71), w(0xd8), w(0x31), w(0x15), w(0x04), \
  8652. w(0xc7), w(0x23), w(0xc3), w(0x18), w(0x96), w(0x05), w(0x9a), \
  8653. w(0x07), w(0x12), w(0x80), w(0xe2), w(0xeb), w(0x27), w(0xb2), \
  8654. w(0x75), w(0x09), w(0x83), w(0x2c), w(0x1a), w(0x1b), w(0x6e), \
  8655. w(0x5a), w(0xa0), w(0x52), w(0x3b), w(0xd6), w(0xb3), w(0x29), \
  8656. w(0xe3), w(0x2f), w(0x84), w(0x53), w(0xd1), w(0x00), w(0xed), \
  8657. w(0x20), w(0xfc), w(0xb1), w(0x5b), w(0x6a), w(0xcb), w(0xbe), \
  8658. w(0x39), w(0x4a), w(0x4c), w(0x58), w(0xcf), w(0xd0), w(0xef), \
  8659. w(0xaa), w(0xfb), w(0x43), w(0x4d), w(0x33), w(0x85), w(0x45), \
  8660. w(0xf9), w(0x02), w(0x7f), w(0x50), w(0x3c), w(0x9f), w(0xa8), \
  8661. w(0x51), w(0xa3), w(0x40), w(0x8f), w(0x92), w(0x9d), w(0x38), \
  8662. w(0xf5), w(0xbc), w(0xb6), w(0xda), w(0x21), w(0x10), w(0xff), \
  8663. w(0xf3), w(0xd2), w(0xcd), w(0x0c), w(0x13), w(0xec), w(0x5f), \
  8664. w(0x97), w(0x44), w(0x17), w(0xc4), w(0xa7), w(0x7e), w(0x3d), \
  8665. w(0x64), w(0x5d), w(0x19), w(0x73), w(0x60), w(0x81), w(0x4f), \
  8666. w(0xdc), w(0x22), w(0x2a), w(0x90), w(0x88), w(0x46), w(0xee), \
  8667. w(0xb8), w(0x14), w(0xde), w(0x5e), w(0x0b), w(0xdb), w(0xe0), \
  8668. w(0x32), w(0x3a), w(0x0a), w(0x49), w(0x06), w(0x24), w(0x5c), \
  8669. w(0xc2), w(0xd3), w(0xac), w(0x62), w(0x91), w(0x95), w(0xe4), \
  8670. w(0x79), w(0xe7), w(0xc8), w(0x37), w(0x6d), w(0x8d), w(0xd5), \
  8671. w(0x4e), w(0xa9), w(0x6c), w(0x56), w(0xf4), w(0xea), w(0x65), \
  8672. w(0x7a), w(0xae), w(0x08), w(0xba), w(0x78), w(0x25), w(0x2e), \
  8673. w(0x1c), w(0xa6), w(0xb4), w(0xc6), w(0xe8), w(0xdd), w(0x74), \
  8674. w(0x1f), w(0x4b), w(0xbd), w(0x8b), w(0x8a), w(0x70), w(0x3e), \
  8675. w(0xb5), w(0x66), w(0x48), w(0x03), w(0xf6), w(0x0e), w(0x61), \
  8676. w(0x35), w(0x57), w(0xb9), w(0x86), w(0xc1), w(0x1d), w(0x9e), \
  8677. w(0xe1), w(0xf8), w(0x98), w(0x11), w(0x69), w(0xd9), w(0x8e), \
  8678. w(0x94), w(0x9b), w(0x1e), w(0x87), w(0xe9), w(0xce), w(0x55), \
  8679. w(0x28), w(0xdf), w(0x8c), w(0xa1), w(0x89), w(0x0d), w(0xbf), \
  8680. w(0xe6), w(0x42), w(0x68), w(0x41), w(0x99), w(0x2d), w(0x0f), \
  8681. w(0xb0), w(0x54), w(0xbb), w(0x16) \
  8682. }
  8683. #define SSE2NEON_AES_RSBOX(w) \
  8684. { \
  8685. w(0x52), w(0x09), w(0x6a), w(0xd5), w(0x30), w(0x36), w(0xa5), \
  8686. w(0x38), w(0xbf), w(0x40), w(0xa3), w(0x9e), w(0x81), w(0xf3), \
  8687. w(0xd7), w(0xfb), w(0x7c), w(0xe3), w(0x39), w(0x82), w(0x9b), \
  8688. w(0x2f), w(0xff), w(0x87), w(0x34), w(0x8e), w(0x43), w(0x44), \
  8689. w(0xc4), w(0xde), w(0xe9), w(0xcb), w(0x54), w(0x7b), w(0x94), \
  8690. w(0x32), w(0xa6), w(0xc2), w(0x23), w(0x3d), w(0xee), w(0x4c), \
  8691. w(0x95), w(0x0b), w(0x42), w(0xfa), w(0xc3), w(0x4e), w(0x08), \
  8692. w(0x2e), w(0xa1), w(0x66), w(0x28), w(0xd9), w(0x24), w(0xb2), \
  8693. w(0x76), w(0x5b), w(0xa2), w(0x49), w(0x6d), w(0x8b), w(0xd1), \
  8694. w(0x25), w(0x72), w(0xf8), w(0xf6), w(0x64), w(0x86), w(0x68), \
  8695. w(0x98), w(0x16), w(0xd4), w(0xa4), w(0x5c), w(0xcc), w(0x5d), \
  8696. w(0x65), w(0xb6), w(0x92), w(0x6c), w(0x70), w(0x48), w(0x50), \
  8697. w(0xfd), w(0xed), w(0xb9), w(0xda), w(0x5e), w(0x15), w(0x46), \
  8698. w(0x57), w(0xa7), w(0x8d), w(0x9d), w(0x84), w(0x90), w(0xd8), \
  8699. w(0xab), w(0x00), w(0x8c), w(0xbc), w(0xd3), w(0x0a), w(0xf7), \
  8700. w(0xe4), w(0x58), w(0x05), w(0xb8), w(0xb3), w(0x45), w(0x06), \
  8701. w(0xd0), w(0x2c), w(0x1e), w(0x8f), w(0xca), w(0x3f), w(0x0f), \
  8702. w(0x02), w(0xc1), w(0xaf), w(0xbd), w(0x03), w(0x01), w(0x13), \
  8703. w(0x8a), w(0x6b), w(0x3a), w(0x91), w(0x11), w(0x41), w(0x4f), \
  8704. w(0x67), w(0xdc), w(0xea), w(0x97), w(0xf2), w(0xcf), w(0xce), \
  8705. w(0xf0), w(0xb4), w(0xe6), w(0x73), w(0x96), w(0xac), w(0x74), \
  8706. w(0x22), w(0xe7), w(0xad), w(0x35), w(0x85), w(0xe2), w(0xf9), \
  8707. w(0x37), w(0xe8), w(0x1c), w(0x75), w(0xdf), w(0x6e), w(0x47), \
  8708. w(0xf1), w(0x1a), w(0x71), w(0x1d), w(0x29), w(0xc5), w(0x89), \
  8709. w(0x6f), w(0xb7), w(0x62), w(0x0e), w(0xaa), w(0x18), w(0xbe), \
  8710. w(0x1b), w(0xfc), w(0x56), w(0x3e), w(0x4b), w(0xc6), w(0xd2), \
  8711. w(0x79), w(0x20), w(0x9a), w(0xdb), w(0xc0), w(0xfe), w(0x78), \
  8712. w(0xcd), w(0x5a), w(0xf4), w(0x1f), w(0xdd), w(0xa8), w(0x33), \
  8713. w(0x88), w(0x07), w(0xc7), w(0x31), w(0xb1), w(0x12), w(0x10), \
  8714. w(0x59), w(0x27), w(0x80), w(0xec), w(0x5f), w(0x60), w(0x51), \
  8715. w(0x7f), w(0xa9), w(0x19), w(0xb5), w(0x4a), w(0x0d), w(0x2d), \
  8716. w(0xe5), w(0x7a), w(0x9f), w(0x93), w(0xc9), w(0x9c), w(0xef), \
  8717. w(0xa0), w(0xe0), w(0x3b), w(0x4d), w(0xae), w(0x2a), w(0xf5), \
  8718. w(0xb0), w(0xc8), w(0xeb), w(0xbb), w(0x3c), w(0x83), w(0x53), \
  8719. w(0x99), w(0x61), w(0x17), w(0x2b), w(0x04), w(0x7e), w(0xba), \
  8720. w(0x77), w(0xd6), w(0x26), w(0xe1), w(0x69), w(0x14), w(0x63), \
  8721. w(0x55), w(0x21), w(0x0c), w(0x7d) \
  8722. }
  8723. /* clang-format on */
  8724. /* X Macro trick. See https://en.wikipedia.org/wiki/X_Macro */
  8725. #define SSE2NEON_AES_H0(x) (x)
  8726. static const uint8_t _sse2neon_sbox[256] = SSE2NEON_AES_SBOX(SSE2NEON_AES_H0);
  8727. static const uint8_t _sse2neon_rsbox[256] = SSE2NEON_AES_RSBOX(SSE2NEON_AES_H0);
  8728. #undef SSE2NEON_AES_H0
  8729. /* x_time function and matrix multiply function */
  8730. #if !defined(__aarch64__)
  8731. #define SSE2NEON_XT(x) (((x) << 1) ^ ((((x) >> 7) & 1) * 0x1b))
  8732. #define SSE2NEON_MULTIPLY(x, y) \
  8733. (((y & 1) * x) ^ ((y >> 1 & 1) * SSE2NEON_XT(x)) ^ \
  8734. ((y >> 2 & 1) * SSE2NEON_XT(SSE2NEON_XT(x))) ^ \
  8735. ((y >> 3 & 1) * SSE2NEON_XT(SSE2NEON_XT(SSE2NEON_XT(x)))) ^ \
  8736. ((y >> 4 & 1) * SSE2NEON_XT(SSE2NEON_XT(SSE2NEON_XT(SSE2NEON_XT(x))))))
  8737. #endif
  8738. // In the absence of crypto extensions, implement aesenc using regular neon
  8739. // intrinsics instead. See:
  8740. // https://www.workofard.com/2017/01/accelerated-aes-for-the-arm64-linux-kernel/
  8741. // https://www.workofard.com/2017/07/ghash-for-low-end-cores/ and
  8742. // https://github.com/ColinIanKing/linux-next-mirror/blob/b5f466091e130caaf0735976648f72bd5e09aa84/crypto/aegis128-neon-inner.c#L52
  8743. // for more information Reproduced with permission of the author.
  8744. FORCE_INLINE __m128i _mm_aesenc_si128(__m128i a, __m128i RoundKey)
  8745. {
  8746. #if defined(__aarch64__)
  8747. static const uint8_t shift_rows[] = {
  8748. 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3,
  8749. 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb,
  8750. };
  8751. static const uint8_t ror32by8[] = {
  8752. 0x1, 0x2, 0x3, 0x0, 0x5, 0x6, 0x7, 0x4,
  8753. 0x9, 0xa, 0xb, 0x8, 0xd, 0xe, 0xf, 0xc,
  8754. };
  8755. uint8x16_t v;
  8756. uint8x16_t w = vreinterpretq_u8_m128i(a);
  8757. /* shift rows */
  8758. w = vqtbl1q_u8(w, vld1q_u8(shift_rows));
  8759. /* sub bytes */
  8760. // Here, we separate the whole 256-bytes table into 4 64-bytes tables, and
  8761. // look up each of the table. After each lookup, we load the next table
  8762. // which locates at the next 64-bytes. In the meantime, the index in the
  8763. // table would be smaller than it was, so the index parameters of
  8764. // `vqtbx4q_u8()` need to be added the same constant as the loaded tables.
  8765. v = vqtbl4q_u8(_sse2neon_vld1q_u8_x4(_sse2neon_sbox), w);
  8766. // 'w-0x40' equals to 'vsubq_u8(w, vdupq_n_u8(0x40))'
  8767. v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0x40), w - 0x40);
  8768. v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0x80), w - 0x80);
  8769. v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0xc0), w - 0xc0);
  8770. /* mix columns */
  8771. w = (v << 1) ^ (uint8x16_t) (((int8x16_t) v >> 7) & 0x1b);
  8772. w ^= (uint8x16_t) vrev32q_u16((uint16x8_t) v);
  8773. w ^= vqtbl1q_u8(v ^ w, vld1q_u8(ror32by8));
  8774. /* add round key */
  8775. return vreinterpretq_m128i_u8(w) ^ RoundKey;
  8776. #else /* ARMv7-A implementation for a table-based AES */
  8777. #define SSE2NEON_AES_B2W(b0, b1, b2, b3) \
  8778. (((uint32_t) (b3) << 24) | ((uint32_t) (b2) << 16) | \
  8779. ((uint32_t) (b1) << 8) | (uint32_t) (b0))
  8780. // muliplying 'x' by 2 in GF(2^8)
  8781. #define SSE2NEON_AES_F2(x) ((x << 1) ^ (((x >> 7) & 1) * 0x011b /* WPOLY */))
  8782. // muliplying 'x' by 3 in GF(2^8)
  8783. #define SSE2NEON_AES_F3(x) (SSE2NEON_AES_F2(x) ^ x)
  8784. #define SSE2NEON_AES_U0(p) \
  8785. SSE2NEON_AES_B2W(SSE2NEON_AES_F2(p), p, p, SSE2NEON_AES_F3(p))
  8786. #define SSE2NEON_AES_U1(p) \
  8787. SSE2NEON_AES_B2W(SSE2NEON_AES_F3(p), SSE2NEON_AES_F2(p), p, p)
  8788. #define SSE2NEON_AES_U2(p) \
  8789. SSE2NEON_AES_B2W(p, SSE2NEON_AES_F3(p), SSE2NEON_AES_F2(p), p)
  8790. #define SSE2NEON_AES_U3(p) \
  8791. SSE2NEON_AES_B2W(p, p, SSE2NEON_AES_F3(p), SSE2NEON_AES_F2(p))
  8792. // this generates a table containing every possible permutation of
  8793. // shift_rows() and sub_bytes() with mix_columns().
  8794. static const uint32_t ALIGN_STRUCT(16) aes_table[4][256] = {
  8795. SSE2NEON_AES_SBOX(SSE2NEON_AES_U0),
  8796. SSE2NEON_AES_SBOX(SSE2NEON_AES_U1),
  8797. SSE2NEON_AES_SBOX(SSE2NEON_AES_U2),
  8798. SSE2NEON_AES_SBOX(SSE2NEON_AES_U3),
  8799. };
  8800. #undef SSE2NEON_AES_B2W
  8801. #undef SSE2NEON_AES_F2
  8802. #undef SSE2NEON_AES_F3
  8803. #undef SSE2NEON_AES_U0
  8804. #undef SSE2NEON_AES_U1
  8805. #undef SSE2NEON_AES_U2
  8806. #undef SSE2NEON_AES_U3
  8807. uint32_t x0 = _mm_cvtsi128_si32(a); // get a[31:0]
  8808. uint32_t x1 =
  8809. _mm_cvtsi128_si32(_mm_shuffle_epi32(a, 0x55)); // get a[63:32]
  8810. uint32_t x2 =
  8811. _mm_cvtsi128_si32(_mm_shuffle_epi32(a, 0xAA)); // get a[95:64]
  8812. uint32_t x3 =
  8813. _mm_cvtsi128_si32(_mm_shuffle_epi32(a, 0xFF)); // get a[127:96]
  8814. // finish the modulo addition step in mix_columns()
  8815. __m128i out = _mm_set_epi32(
  8816. (aes_table[0][x3 & 0xff] ^ aes_table[1][(x0 >> 8) & 0xff] ^
  8817. aes_table[2][(x1 >> 16) & 0xff] ^ aes_table[3][x2 >> 24]),
  8818. (aes_table[0][x2 & 0xff] ^ aes_table[1][(x3 >> 8) & 0xff] ^
  8819. aes_table[2][(x0 >> 16) & 0xff] ^ aes_table[3][x1 >> 24]),
  8820. (aes_table[0][x1 & 0xff] ^ aes_table[1][(x2 >> 8) & 0xff] ^
  8821. aes_table[2][(x3 >> 16) & 0xff] ^ aes_table[3][x0 >> 24]),
  8822. (aes_table[0][x0 & 0xff] ^ aes_table[1][(x1 >> 8) & 0xff] ^
  8823. aes_table[2][(x2 >> 16) & 0xff] ^ aes_table[3][x3 >> 24]));
  8824. return _mm_xor_si128(out, RoundKey);
  8825. #endif
  8826. }
  8827. // Perform one round of an AES decryption flow on data (state) in a using the
  8828. // round key in RoundKey, and store the result in dst.
  8829. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesdec_si128
  8830. FORCE_INLINE __m128i _mm_aesdec_si128(__m128i a, __m128i RoundKey)
  8831. {
  8832. #if defined(__aarch64__)
  8833. static const uint8_t inv_shift_rows[] = {
  8834. 0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb,
  8835. 0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3,
  8836. };
  8837. static const uint8_t ror32by8[] = {
  8838. 0x1, 0x2, 0x3, 0x0, 0x5, 0x6, 0x7, 0x4,
  8839. 0x9, 0xa, 0xb, 0x8, 0xd, 0xe, 0xf, 0xc,
  8840. };
  8841. uint8x16_t v;
  8842. uint8x16_t w = vreinterpretq_u8_m128i(a);
  8843. // inverse shift rows
  8844. w = vqtbl1q_u8(w, vld1q_u8(inv_shift_rows));
  8845. // inverse sub bytes
  8846. v = vqtbl4q_u8(_sse2neon_vld1q_u8_x4(_sse2neon_rsbox), w);
  8847. v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_rsbox + 0x40), w - 0x40);
  8848. v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_rsbox + 0x80), w - 0x80);
  8849. v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_rsbox + 0xc0), w - 0xc0);
  8850. // inverse mix columns
  8851. // muliplying 'v' by 4 in GF(2^8)
  8852. w = (v << 1) ^ (uint8x16_t) (((int8x16_t) v >> 7) & 0x1b);
  8853. w = (w << 1) ^ (uint8x16_t) (((int8x16_t) w >> 7) & 0x1b);
  8854. v ^= w;
  8855. v ^= (uint8x16_t) vrev32q_u16((uint16x8_t) w);
  8856. w = (v << 1) ^ (uint8x16_t) (((int8x16_t) v >> 7) &
  8857. 0x1b); // muliplying 'v' by 2 in GF(2^8)
  8858. w ^= (uint8x16_t) vrev32q_u16((uint16x8_t) v);
  8859. w ^= vqtbl1q_u8(v ^ w, vld1q_u8(ror32by8));
  8860. // add round key
  8861. return vreinterpretq_m128i_u8(w) ^ RoundKey;
  8862. #else /* ARMv7-A NEON implementation */
  8863. /* FIXME: optimized for NEON */
  8864. uint8_t i, e, f, g, h, v[4][4];
  8865. uint8_t *_a = (uint8_t *) &a;
  8866. for (i = 0; i < 16; ++i) {
  8867. v[((i / 4) + (i % 4)) % 4][i % 4] = _sse2neon_rsbox[_a[i]];
  8868. }
  8869. // inverse mix columns
  8870. for (i = 0; i < 4; ++i) {
  8871. e = v[i][0];
  8872. f = v[i][1];
  8873. g = v[i][2];
  8874. h = v[i][3];
  8875. v[i][0] = SSE2NEON_MULTIPLY(e, 0x0e) ^ SSE2NEON_MULTIPLY(f, 0x0b) ^
  8876. SSE2NEON_MULTIPLY(g, 0x0d) ^ SSE2NEON_MULTIPLY(h, 0x09);
  8877. v[i][1] = SSE2NEON_MULTIPLY(e, 0x09) ^ SSE2NEON_MULTIPLY(f, 0x0e) ^
  8878. SSE2NEON_MULTIPLY(g, 0x0b) ^ SSE2NEON_MULTIPLY(h, 0x0d);
  8879. v[i][2] = SSE2NEON_MULTIPLY(e, 0x0d) ^ SSE2NEON_MULTIPLY(f, 0x09) ^
  8880. SSE2NEON_MULTIPLY(g, 0x0e) ^ SSE2NEON_MULTIPLY(h, 0x0b);
  8881. v[i][3] = SSE2NEON_MULTIPLY(e, 0x0b) ^ SSE2NEON_MULTIPLY(f, 0x0d) ^
  8882. SSE2NEON_MULTIPLY(g, 0x09) ^ SSE2NEON_MULTIPLY(h, 0x0e);
  8883. }
  8884. return vreinterpretq_m128i_u8(vld1q_u8((uint8_t *) v)) ^ RoundKey;
  8885. #endif
  8886. }
  8887. // Perform the last round of an AES encryption flow on data (state) in a using
  8888. // the round key in RoundKey, and store the result in dst.
  8889. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesenclast_si128
  8890. FORCE_INLINE __m128i _mm_aesenclast_si128(__m128i a, __m128i RoundKey)
  8891. {
  8892. #if defined(__aarch64__)
  8893. static const uint8_t shift_rows[] = {
  8894. 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3,
  8895. 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb,
  8896. };
  8897. uint8x16_t v;
  8898. uint8x16_t w = vreinterpretq_u8_m128i(a);
  8899. // shift rows
  8900. w = vqtbl1q_u8(w, vld1q_u8(shift_rows));
  8901. // sub bytes
  8902. v = vqtbl4q_u8(_sse2neon_vld1q_u8_x4(_sse2neon_sbox), w);
  8903. v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0x40), w - 0x40);
  8904. v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0x80), w - 0x80);
  8905. v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0xc0), w - 0xc0);
  8906. // add round key
  8907. return vreinterpretq_m128i_u8(v) ^ RoundKey;
  8908. #else /* ARMv7-A implementation */
  8909. uint8_t v[16] = {
  8910. _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 0)],
  8911. _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 5)],
  8912. _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 10)],
  8913. _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 15)],
  8914. _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 4)],
  8915. _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 9)],
  8916. _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 14)],
  8917. _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 3)],
  8918. _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 8)],
  8919. _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 13)],
  8920. _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 2)],
  8921. _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 7)],
  8922. _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 12)],
  8923. _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 1)],
  8924. _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 6)],
  8925. _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 11)],
  8926. };
  8927. return vreinterpretq_m128i_u8(vld1q_u8(v)) ^ RoundKey;
  8928. #endif
  8929. }
  8930. // Perform the last round of an AES decryption flow on data (state) in a using
  8931. // the round key in RoundKey, and store the result in dst.
  8932. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesdeclast_si128
  8933. FORCE_INLINE __m128i _mm_aesdeclast_si128(__m128i a, __m128i RoundKey)
  8934. {
  8935. #if defined(__aarch64__)
  8936. static const uint8_t inv_shift_rows[] = {
  8937. 0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb,
  8938. 0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3,
  8939. };
  8940. uint8x16_t v;
  8941. uint8x16_t w = vreinterpretq_u8_m128i(a);
  8942. // inverse shift rows
  8943. w = vqtbl1q_u8(w, vld1q_u8(inv_shift_rows));
  8944. // inverse sub bytes
  8945. v = vqtbl4q_u8(_sse2neon_vld1q_u8_x4(_sse2neon_rsbox), w);
  8946. v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_rsbox + 0x40), w - 0x40);
  8947. v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_rsbox + 0x80), w - 0x80);
  8948. v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_rsbox + 0xc0), w - 0xc0);
  8949. // add round key
  8950. return vreinterpretq_m128i_u8(v) ^ RoundKey;
  8951. #else /* ARMv7-A NEON implementation */
  8952. /* FIXME: optimized for NEON */
  8953. uint8_t v[4][4];
  8954. uint8_t *_a = (uint8_t *) &a;
  8955. for (int i = 0; i < 16; ++i) {
  8956. v[((i / 4) + (i % 4)) % 4][i % 4] = _sse2neon_rsbox[_a[i]];
  8957. }
  8958. return vreinterpretq_m128i_u8(vld1q_u8((uint8_t *) v)) ^ RoundKey;
  8959. #endif
  8960. }
  8961. // Perform the InvMixColumns transformation on a and store the result in dst.
  8962. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesimc_si128
  8963. FORCE_INLINE __m128i _mm_aesimc_si128(__m128i a)
  8964. {
  8965. #if defined(__aarch64__)
  8966. static const uint8_t ror32by8[] = {
  8967. 0x1, 0x2, 0x3, 0x0, 0x5, 0x6, 0x7, 0x4,
  8968. 0x9, 0xa, 0xb, 0x8, 0xd, 0xe, 0xf, 0xc,
  8969. };
  8970. uint8x16_t v = vreinterpretq_u8_m128i(a);
  8971. uint8x16_t w;
  8972. // multiplying 'v' by 4 in GF(2^8)
  8973. w = (v << 1) ^ (uint8x16_t) (((int8x16_t) v >> 7) & 0x1b);
  8974. w = (w << 1) ^ (uint8x16_t) (((int8x16_t) w >> 7) & 0x1b);
  8975. v ^= w;
  8976. v ^= (uint8x16_t) vrev32q_u16((uint16x8_t) w);
  8977. // multiplying 'v' by 2 in GF(2^8)
  8978. w = (v << 1) ^ (uint8x16_t) (((int8x16_t) v >> 7) & 0x1b);
  8979. w ^= (uint8x16_t) vrev32q_u16((uint16x8_t) v);
  8980. w ^= vqtbl1q_u8(v ^ w, vld1q_u8(ror32by8));
  8981. return vreinterpretq_m128i_u8(w);
  8982. #else /* ARMv7-A NEON implementation */
  8983. uint8_t i, e, f, g, h, v[4][4];
  8984. vst1q_u8((uint8_t *) v, vreinterpretq_u8_m128i(a));
  8985. for (i = 0; i < 4; ++i) {
  8986. e = v[i][0];
  8987. f = v[i][1];
  8988. g = v[i][2];
  8989. h = v[i][3];
  8990. v[i][0] = SSE2NEON_MULTIPLY(e, 0x0e) ^ SSE2NEON_MULTIPLY(f, 0x0b) ^
  8991. SSE2NEON_MULTIPLY(g, 0x0d) ^ SSE2NEON_MULTIPLY(h, 0x09);
  8992. v[i][1] = SSE2NEON_MULTIPLY(e, 0x09) ^ SSE2NEON_MULTIPLY(f, 0x0e) ^
  8993. SSE2NEON_MULTIPLY(g, 0x0b) ^ SSE2NEON_MULTIPLY(h, 0x0d);
  8994. v[i][2] = SSE2NEON_MULTIPLY(e, 0x0d) ^ SSE2NEON_MULTIPLY(f, 0x09) ^
  8995. SSE2NEON_MULTIPLY(g, 0x0e) ^ SSE2NEON_MULTIPLY(h, 0x0b);
  8996. v[i][3] = SSE2NEON_MULTIPLY(e, 0x0b) ^ SSE2NEON_MULTIPLY(f, 0x0d) ^
  8997. SSE2NEON_MULTIPLY(g, 0x09) ^ SSE2NEON_MULTIPLY(h, 0x0e);
  8998. }
  8999. return vreinterpretq_m128i_u8(vld1q_u8((uint8_t *) v));
  9000. #endif
  9001. }
  9002. // Emits the Advanced Encryption Standard (AES) instruction aeskeygenassist.
  9003. // This instruction generates a round key for AES encryption. See
  9004. // https://kazakov.life/2017/11/01/cryptocurrency-mining-on-ios-devices/
  9005. // for details.
  9006. //
  9007. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aeskeygenassist_si128
  9008. FORCE_INLINE __m128i _mm_aeskeygenassist_si128(__m128i a, const int rcon)
  9009. {
  9010. #if defined(__aarch64__)
  9011. uint8x16_t _a = vreinterpretq_u8_m128i(a);
  9012. uint8x16_t v = vqtbl4q_u8(_sse2neon_vld1q_u8_x4(_sse2neon_sbox), _a);
  9013. v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0x40), _a - 0x40);
  9014. v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0x80), _a - 0x80);
  9015. v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0xc0), _a - 0xc0);
  9016. uint32x4_t select_mask = {0xffffffff, 0x0, 0xffffffff, 0x0};
  9017. uint64x2_t v_mask = vshrq_n_u64(vreinterpretq_u64_u8(v), 32);
  9018. uint32x4_t x = vbslq_u32(select_mask, vreinterpretq_u32_u64(v_mask),
  9019. vreinterpretq_u32_u8(v));
  9020. uint32x4_t ror_x = vorrq_u32(vshrq_n_u32(x, 8), vshlq_n_u32(x, 24));
  9021. uint32x4_t ror_xor_x = veorq_u32(ror_x, vdupq_n_u32(rcon));
  9022. return vreinterpretq_m128i_u32(vbslq_u32(select_mask, x, ror_xor_x));
  9023. #else /* ARMv7-A NEON implementation */
  9024. uint32_t X1 = _mm_cvtsi128_si32(_mm_shuffle_epi32(a, 0x55));
  9025. uint32_t X3 = _mm_cvtsi128_si32(_mm_shuffle_epi32(a, 0xFF));
  9026. for (int i = 0; i < 4; ++i) {
  9027. ((uint8_t *) &X1)[i] = _sse2neon_sbox[((uint8_t *) &X1)[i]];
  9028. ((uint8_t *) &X3)[i] = _sse2neon_sbox[((uint8_t *) &X3)[i]];
  9029. }
  9030. return _mm_set_epi32(((X3 >> 8) | (X3 << 24)) ^ rcon, X3,
  9031. ((X1 >> 8) | (X1 << 24)) ^ rcon, X1);
  9032. #endif
  9033. }
  9034. #undef SSE2NEON_AES_SBOX
  9035. #undef SSE2NEON_AES_RSBOX
  9036. #if defined(__aarch64__)
  9037. #undef SSE2NEON_XT
  9038. #undef SSE2NEON_MULTIPLY
  9039. #endif
  9040. #else /* __ARM_FEATURE_CRYPTO */
  9041. // Implements equivalent of 'aesenc' by combining AESE (with an empty key) and
  9042. // AESMC and then manually applying the real key as an xor operation. This
  9043. // unfortunately means an additional xor op; the compiler should be able to
  9044. // optimize this away for repeated calls however. See
  9045. // https://blog.michaelbrase.com/2018/05/08/emulating-x86-aes-intrinsics-on-armv8-a
  9046. // for more details.
  9047. FORCE_INLINE __m128i _mm_aesenc_si128(__m128i a, __m128i b)
  9048. {
  9049. return vreinterpretq_m128i_u8(
  9050. vaesmcq_u8(vaeseq_u8(vreinterpretq_u8_m128i(a), vdupq_n_u8(0))) ^
  9051. vreinterpretq_u8_m128i(b));
  9052. }
  9053. // Perform one round of an AES decryption flow on data (state) in a using the
  9054. // round key in RoundKey, and store the result in dst.
  9055. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesdec_si128
  9056. FORCE_INLINE __m128i _mm_aesdec_si128(__m128i a, __m128i RoundKey)
  9057. {
  9058. return vreinterpretq_m128i_u8(veorq_u8(
  9059. vaesimcq_u8(vaesdq_u8(vreinterpretq_u8_m128i(a), vdupq_n_u8(0))),
  9060. vreinterpretq_u8_m128i(RoundKey)));
  9061. }
  9062. // Perform the last round of an AES encryption flow on data (state) in a using
  9063. // the round key in RoundKey, and store the result in dst.
  9064. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesenclast_si128
  9065. FORCE_INLINE __m128i _mm_aesenclast_si128(__m128i a, __m128i RoundKey)
  9066. {
  9067. return _mm_xor_si128(vreinterpretq_m128i_u8(vaeseq_u8(
  9068. vreinterpretq_u8_m128i(a), vdupq_n_u8(0))),
  9069. RoundKey);
  9070. }
  9071. // Perform the last round of an AES decryption flow on data (state) in a using
  9072. // the round key in RoundKey, and store the result in dst.
  9073. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesdeclast_si128
  9074. FORCE_INLINE __m128i _mm_aesdeclast_si128(__m128i a, __m128i RoundKey)
  9075. {
  9076. return vreinterpretq_m128i_u8(
  9077. vaesdq_u8(vreinterpretq_u8_m128i(a), vdupq_n_u8(0))) ^
  9078. vreinterpretq_u8_m128i(RoundKey);
  9079. }
  9080. // Perform the InvMixColumns transformation on a and store the result in dst.
  9081. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesimc_si128
  9082. FORCE_INLINE __m128i _mm_aesimc_si128(__m128i a)
  9083. {
  9084. return vreinterpretq_m128i_u8(vaesimcq_u8(a));
  9085. }
  9086. // Assist in expanding the AES cipher key by computing steps towards generating
  9087. // a round key for encryption cipher using data from a and an 8-bit round
  9088. // constant specified in imm8, and store the result in dst."
  9089. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aeskeygenassist_si128
  9090. FORCE_INLINE __m128i _mm_aeskeygenassist_si128(__m128i a, const int rcon)
  9091. {
  9092. // AESE does ShiftRows and SubBytes on A
  9093. uint8x16_t u8 = vaeseq_u8(vreinterpretq_u8_m128i(a), vdupq_n_u8(0));
  9094. uint8x16_t dest = {
  9095. // Undo ShiftRows step from AESE and extract X1 and X3
  9096. u8[0x4], u8[0x1], u8[0xE], u8[0xB], // SubBytes(X1)
  9097. u8[0x1], u8[0xE], u8[0xB], u8[0x4], // ROT(SubBytes(X1))
  9098. u8[0xC], u8[0x9], u8[0x6], u8[0x3], // SubBytes(X3)
  9099. u8[0x9], u8[0x6], u8[0x3], u8[0xC], // ROT(SubBytes(X3))
  9100. };
  9101. uint32x4_t r = {0, (unsigned) rcon, 0, (unsigned) rcon};
  9102. return vreinterpretq_m128i_u8(dest) ^ vreinterpretq_m128i_u32(r);
  9103. }
  9104. #endif
  9105. /* Others */
  9106. // Perform a carry-less multiplication of two 64-bit integers, selected from a
  9107. // and b according to imm8, and store the results in dst.
  9108. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_clmulepi64_si128
  9109. FORCE_INLINE __m128i _mm_clmulepi64_si128(__m128i _a, __m128i _b, const int imm)
  9110. {
  9111. uint64x2_t a = vreinterpretq_u64_m128i(_a);
  9112. uint64x2_t b = vreinterpretq_u64_m128i(_b);
  9113. switch (imm & 0x11) {
  9114. case 0x00:
  9115. return vreinterpretq_m128i_u64(
  9116. _sse2neon_vmull_p64(vget_low_u64(a), vget_low_u64(b)));
  9117. case 0x01:
  9118. return vreinterpretq_m128i_u64(
  9119. _sse2neon_vmull_p64(vget_high_u64(a), vget_low_u64(b)));
  9120. case 0x10:
  9121. return vreinterpretq_m128i_u64(
  9122. _sse2neon_vmull_p64(vget_low_u64(a), vget_high_u64(b)));
  9123. case 0x11:
  9124. return vreinterpretq_m128i_u64(
  9125. _sse2neon_vmull_p64(vget_high_u64(a), vget_high_u64(b)));
  9126. default:
  9127. abort();
  9128. }
  9129. }
  9130. FORCE_INLINE unsigned int _sse2neon_mm_get_denormals_zero_mode()
  9131. {
  9132. union {
  9133. fpcr_bitfield field;
  9134. #if defined(__aarch64__)
  9135. uint64_t value;
  9136. #else
  9137. uint32_t value;
  9138. #endif
  9139. } r;
  9140. #if defined(__aarch64__)
  9141. __asm__ __volatile__("mrs %0, FPCR" : "=r"(r.value)); /* read */
  9142. #else
  9143. __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */
  9144. #endif
  9145. return r.field.bit24 ? _MM_DENORMALS_ZERO_ON : _MM_DENORMALS_ZERO_OFF;
  9146. }
  9147. // Count the number of bits set to 1 in unsigned 32-bit integer a, and
  9148. // return that count in dst.
  9149. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_popcnt_u32
  9150. FORCE_INLINE int _mm_popcnt_u32(unsigned int a)
  9151. {
  9152. #if defined(__aarch64__)
  9153. #if __has_builtin(__builtin_popcount)
  9154. return __builtin_popcount(a);
  9155. #else
  9156. return (int) vaddlv_u8(vcnt_u8(vcreate_u8((uint64_t) a)));
  9157. #endif
  9158. #else
  9159. uint32_t count = 0;
  9160. uint8x8_t input_val, count8x8_val;
  9161. uint16x4_t count16x4_val;
  9162. uint32x2_t count32x2_val;
  9163. input_val = vld1_u8((uint8_t *) &a);
  9164. count8x8_val = vcnt_u8(input_val);
  9165. count16x4_val = vpaddl_u8(count8x8_val);
  9166. count32x2_val = vpaddl_u16(count16x4_val);
  9167. vst1_u32(&count, count32x2_val);
  9168. return count;
  9169. #endif
  9170. }
  9171. // Count the number of bits set to 1 in unsigned 64-bit integer a, and
  9172. // return that count in dst.
  9173. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_popcnt_u64
  9174. FORCE_INLINE int64_t _mm_popcnt_u64(uint64_t a)
  9175. {
  9176. #if defined(__aarch64__)
  9177. #if __has_builtin(__builtin_popcountll)
  9178. return __builtin_popcountll(a);
  9179. #else
  9180. return (int64_t) vaddlv_u8(vcnt_u8(vcreate_u8(a)));
  9181. #endif
  9182. #else
  9183. uint64_t count = 0;
  9184. uint8x8_t input_val, count8x8_val;
  9185. uint16x4_t count16x4_val;
  9186. uint32x2_t count32x2_val;
  9187. uint64x1_t count64x1_val;
  9188. input_val = vld1_u8((uint8_t *) &a);
  9189. count8x8_val = vcnt_u8(input_val);
  9190. count16x4_val = vpaddl_u8(count8x8_val);
  9191. count32x2_val = vpaddl_u16(count16x4_val);
  9192. count64x1_val = vpaddl_u32(count32x2_val);
  9193. vst1_u64(&count, count64x1_val);
  9194. return count;
  9195. #endif
  9196. }
  9197. FORCE_INLINE void _sse2neon_mm_set_denormals_zero_mode(unsigned int flag)
  9198. {
  9199. // AArch32 Advanced SIMD arithmetic always uses the Flush-to-zero setting,
  9200. // regardless of the value of the FZ bit.
  9201. union {
  9202. fpcr_bitfield field;
  9203. #if defined(__aarch64__)
  9204. uint64_t value;
  9205. #else
  9206. uint32_t value;
  9207. #endif
  9208. } r;
  9209. #if defined(__aarch64__)
  9210. __asm__ __volatile__("mrs %0, FPCR" : "=r"(r.value)); /* read */
  9211. #else
  9212. __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */
  9213. #endif
  9214. r.field.bit24 = (flag & _MM_DENORMALS_ZERO_MASK) == _MM_DENORMALS_ZERO_ON;
  9215. #if defined(__aarch64__)
  9216. __asm__ __volatile__("msr FPCR, %0" ::"r"(r)); /* write */
  9217. #else
  9218. __asm__ __volatile__("vmsr FPSCR, %0" ::"r"(r)); /* write */
  9219. #endif
  9220. }
  9221. // Return the current 64-bit value of the processor's time-stamp counter.
  9222. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=rdtsc
  9223. FORCE_INLINE uint64_t _rdtsc(void)
  9224. {
  9225. #if defined(__aarch64__)
  9226. uint64_t val;
  9227. /* According to ARM DDI 0487F.c, from Armv8.0 to Armv8.5 inclusive, the
  9228. * system counter is at least 56 bits wide; from Armv8.6, the counter
  9229. * must be 64 bits wide. So the system counter could be less than 64
  9230. * bits wide and it is attributed with the flag 'cap_user_time_short'
  9231. * is true.
  9232. */
  9233. __asm__ __volatile__("mrs %0, cntvct_el0" : "=r"(val));
  9234. return val;
  9235. #else
  9236. uint32_t pmccntr, pmuseren, pmcntenset;
  9237. // Read the user mode Performance Monitoring Unit (PMU)
  9238. // User Enable Register (PMUSERENR) access permissions.
  9239. __asm__ __volatile__("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren));
  9240. if (pmuseren & 1) { // Allows reading PMUSERENR for user mode code.
  9241. __asm__ __volatile__("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset));
  9242. if (pmcntenset & 0x80000000UL) { // Is it counting?
  9243. __asm__ __volatile__("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr));
  9244. // The counter is set up to count every 64th cycle
  9245. return (uint64_t) (pmccntr) << 6;
  9246. }
  9247. }
  9248. // Fallback to syscall as we can't enable PMUSERENR in user mode.
  9249. struct timeval tv;
  9250. gettimeofday(&tv, NULL);
  9251. return (uint64_t) (tv.tv_sec) * 1000000 + tv.tv_usec;
  9252. #endif
  9253. }
  9254. #if defined(__GNUC__) || defined(__clang__)
  9255. #pragma pop_macro("ALIGN_STRUCT")
  9256. #pragma pop_macro("FORCE_INLINE")
  9257. #endif
  9258. #if defined(__GNUC__) && !defined(__clang__)
  9259. #pragma GCC pop_options
  9260. #endif
  9261. #endif