| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452124531245412455124561245712458124591246012461124621246312464124651246612467124681246912470124711247212473124741247512476124771247812479124801248112482124831248412485124861248712488124891249012491124921249312494124951249612497124981249912500125011250212503125041250512506125071250812509125101251112512125131251412515125161251712518125191252012521125221252312524125251252612527125281252912530125311253212533125341253512536125371253812539125401254112542125431254412545125461254712548125491255012551125521255312554125551255612557125581255912560125611256212563125641256512566125671256812569125701257112572125731257412575125761257712578125791258012581125821258312584125851258612587125881258912590125911259212593125941259512596125971259812599126001260112602126031260412605126061260712608126091261012611126121261312614126151261612617126181261912620126211262212623126241262512626126271262812629126301263112632126331263412635126361263712638126391264012641126421264312644126451264612647126481264912650126511265212653126541265512656126571265812659126601266112662126631266412665126661266712668126691267012671126721267312674126751267612677126781267912680126811268212683126841268512686126871268812689126901269112692126931269412695126961269712698126991270012701127021270312704127051270612707127081270912710127111271212713127141271512716127171271812719127201272112722127231272412725127261272712728127291273012731127321273312734127351273612737127381273912740127411274212743127441274512746127471274812749127501275112752127531275412755127561275712758127591276012761127621276312764127651276612767127681276912770127711277212773127741277512776127771277812779127801278112782127831278412785127861278712788127891279012791127921279312794127951279612797127981279912800128011280212803128041280512806128071280812809128101281112812128131281412815128161281712818128191282012821128221282312824128251282612827128281282912830128311283212833128341283512836128371283812839128401284112842128431284412845128461284712848128491285012851128521285312854128551285612857128581285912860128611286212863128641286512866128671286812869128701287112872128731287412875128761287712878128791288012881128821288312884128851288612887128881288912890128911289212893128941289512896128971289812899129001290112902129031290412905129061290712908129091291012911129121291312914129151291612917129181291912920129211292212923129241292512926129271292812929129301293112932129331293412935129361293712938129391294012941129421294312944129451294612947129481294912950129511295212953129541295512956129571295812959129601296112962129631296412965129661296712968129691297012971129721297312974129751297612977129781297912980129811298212983129841298512986129871298812989129901299112992129931299412995129961299712998129991300013001130021300313004130051300613007130081300913010130111301213013130141301513016130171301813019130201302113022130231302413025130261302713028130291303013031130321303313034130351303613037130381303913040130411304213043130441304513046130471304813049130501305113052130531305413055130561305713058130591306013061130621306313064130651306613067130681306913070130711307213073130741307513076130771307813079130801308113082130831308413085130861308713088130891309013091130921309313094130951309613097130981309913100131011310213103131041310513106131071310813109131101311113112131131311413115131161311713118131191312013121131221312313124131251312613127131281312913130131311313213133131341313513136131371313813139131401314113142131431314413145131461314713148131491315013151131521315313154131551315613157131581315913160131611316213163131641316513166131671316813169131701317113172131731317413175131761317713178131791318013181131821318313184131851318613187131881318913190131911319213193131941319513196131971319813199132001320113202132031320413205132061320713208132091321013211132121321313214132151321613217132181321913220132211322213223132241322513226132271322813229132301323113232132331323413235132361323713238132391324013241132421324313244132451324613247132481324913250132511325213253132541325513256132571325813259132601326113262132631326413265132661326713268132691327013271132721327313274132751327613277132781327913280132811328213283132841328513286132871328813289132901329113292132931329413295132961329713298132991330013301133021330313304133051330613307133081330913310133111331213313133141331513316133171331813319133201332113322133231332413325133261332713328133291333013331133321333313334133351333613337133381333913340133411334213343133441334513346133471334813349133501335113352133531335413355133561335713358133591336013361133621336313364133651336613367133681336913370133711337213373133741337513376133771337813379133801338113382133831338413385133861338713388133891339013391133921339313394133951339613397133981339913400134011340213403134041340513406134071340813409134101341113412134131341413415134161341713418134191342013421134221342313424134251342613427134281342913430134311343213433134341343513436134371343813439134401344113442134431344413445134461344713448134491345013451134521345313454134551345613457134581345913460134611346213463134641346513466134671346813469134701347113472134731347413475134761347713478134791348013481134821348313484134851348613487134881348913490134911349213493134941349513496134971349813499135001350113502135031350413505135061350713508135091351013511135121351313514135151351613517135181351913520135211352213523135241352513526135271352813529135301353113532135331353413535135361353713538135391354013541135421354313544135451354613547135481354913550135511355213553135541355513556135571355813559135601356113562135631356413565135661356713568135691357013571135721357313574135751357613577135781357913580135811358213583135841358513586135871358813589135901359113592135931359413595135961359713598135991360013601136021360313604136051360613607136081360913610136111361213613136141361513616136171361813619136201362113622136231362413625136261362713628136291363013631136321363313634136351363613637136381363913640136411364213643136441364513646136471364813649136501365113652136531365413655136561365713658136591366013661136621366313664136651366613667136681366913670136711367213673136741367513676136771367813679136801368113682136831368413685136861368713688136891369013691136921369313694136951369613697136981369913700137011370213703137041370513706137071370813709137101371113712137131371413715137161371713718137191372013721137221372313724137251372613727137281372913730137311373213733137341373513736137371373813739137401374113742137431374413745137461374713748137491375013751137521375313754137551375613757137581375913760137611376213763137641376513766137671376813769137701377113772137731377413775137761377713778137791378013781137821378313784137851378613787137881378913790137911379213793137941379513796137971379813799138001380113802138031380413805138061380713808138091381013811138121381313814138151381613817138181381913820138211382213823138241382513826138271382813829138301383113832138331383413835138361383713838138391384013841138421384313844138451384613847138481384913850138511385213853138541385513856138571385813859138601386113862138631386413865138661386713868138691387013871138721387313874138751387613877138781387913880138811388213883138841388513886138871388813889138901389113892138931389413895138961389713898138991390013901139021390313904139051390613907139081390913910139111391213913139141391513916139171391813919139201392113922139231392413925139261392713928139291393013931139321393313934139351393613937139381393913940139411394213943139441394513946139471394813949139501395113952139531395413955139561395713958139591396013961139621396313964139651396613967139681396913970139711397213973139741397513976139771397813979139801398113982139831398413985139861398713988139891399013991139921399313994139951399613997139981399914000140011400214003140041400514006140071400814009140101401114012140131401414015140161401714018140191402014021140221402314024140251402614027140281402914030140311403214033140341403514036140371403814039140401404114042140431404414045140461404714048140491405014051140521405314054140551405614057140581405914060140611406214063140641406514066140671406814069140701407114072140731407414075140761407714078140791408014081140821408314084140851408614087140881408914090140911409214093140941409514096140971409814099141001410114102141031410414105141061410714108141091411014111141121411314114141151411614117141181411914120141211412214123141241412514126141271412814129141301413114132141331413414135141361413714138141391414014141141421414314144141451414614147141481414914150141511415214153141541415514156141571415814159141601416114162141631416414165141661416714168141691417014171141721417314174141751417614177141781417914180141811418214183141841418514186141871418814189141901419114192141931419414195141961419714198141991420014201142021420314204142051420614207142081420914210142111421214213142141421514216142171421814219142201422114222142231422414225142261422714228142291423014231142321423314234142351423614237142381423914240142411424214243142441424514246142471424814249142501425114252142531425414255142561425714258142591426014261142621426314264142651426614267142681426914270142711427214273142741427514276142771427814279142801428114282142831428414285142861428714288142891429014291142921429314294142951429614297142981429914300143011430214303143041430514306143071430814309143101431114312143131431414315143161431714318143191432014321143221432314324143251432614327143281432914330143311433214333143341433514336143371433814339143401434114342143431434414345143461434714348143491435014351143521435314354143551435614357143581435914360143611436214363143641436514366143671436814369143701437114372143731437414375143761437714378143791438014381143821438314384143851438614387143881438914390143911439214393143941439514396143971439814399144001440114402144031440414405144061440714408144091441014411144121441314414144151441614417144181441914420144211442214423144241442514426144271442814429144301443114432144331443414435144361443714438144391444014441144421444314444144451444614447144481444914450144511445214453144541445514456144571445814459144601446114462144631446414465144661446714468144691447014471144721447314474144751447614477144781447914480144811448214483144841448514486144871448814489144901449114492144931449414495144961449714498144991450014501145021450314504145051450614507145081450914510145111451214513145141451514516145171451814519145201452114522145231452414525145261452714528145291453014531145321453314534145351453614537145381453914540145411454214543145441454514546145471454814549145501455114552145531455414555145561455714558145591456014561145621456314564145651456614567145681456914570145711457214573145741457514576145771457814579145801458114582145831458414585145861458714588145891459014591145921459314594 |
- //-------------------------------------------------------------------------------------
- // DirectXMathVector.inl -- SIMD C++ Math library
- //
- // THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF
- // ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO
- // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A
- // PARTICULAR PURPOSE.
- //
- // Copyright (c) Microsoft Corporation. All rights reserved.
- //
- // http://go.microsoft.com/fwlink/?LinkID=615560
- //-------------------------------------------------------------------------------------
- #pragma once
- #if defined(_XM_NO_INTRINSICS_)
- #define XMISNAN(x) ((*(const uint32_t*)&(x) & 0x7F800000) == 0x7F800000 && (*(const uint32_t*)&(x) & 0x7FFFFF) != 0)
- #define XMISINF(x) ((*(const uint32_t*)&(x) & 0x7FFFFFFF) == 0x7F800000)
- #endif
- #if defined(_XM_SSE_INTRINSICS_)
- #define XM3UNPACK3INTO4(l1,l2,l3) \
- XMVECTOR V3 = _mm_shuffle_ps(l2,l3,_MM_SHUFFLE(0,0,3,2));\
- XMVECTOR V2 = _mm_shuffle_ps(l2,l1,_MM_SHUFFLE(3,3,1,0));\
- V2 = XM_PERMUTE_PS(V2,_MM_SHUFFLE(1,1,0,2));\
- XMVECTOR V4 = _mm_castsi128_ps( _mm_srli_si128(_mm_castps_si128(L3),32/8) );
- #define XM3PACK4INTO3(v2x) \
- v2x = _mm_shuffle_ps(V2,V3,_MM_SHUFFLE(1,0,2,1));\
- V2 = _mm_shuffle_ps(V2,V1,_MM_SHUFFLE(2,2,0,0));\
- V1 = _mm_shuffle_ps(V1,V2,_MM_SHUFFLE(0,2,1,0));\
- V3 = _mm_shuffle_ps(V3,V4,_MM_SHUFFLE(0,0,2,2));\
- V3 = _mm_shuffle_ps(V3,V4,_MM_SHUFFLE(2,1,2,0));\
- #endif
- /****************************************************************************
- *
- * General Vector
- *
- ****************************************************************************/
- //------------------------------------------------------------------------------
- // Assignment operations
- //------------------------------------------------------------------------------
- //------------------------------------------------------------------------------
- // Return a vector with all elements equaling zero
- inline XMVECTOR XM_CALLCONV XMVectorZero()
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 vResult = { { { 0.0f, 0.0f, 0.0f, 0.0f } } };
- return vResult.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vdupq_n_f32(0);
- #elif defined(_XM_SSE_INTRINSICS_)
- return _mm_setzero_ps();
- #endif
- }
- //------------------------------------------------------------------------------
- // Initialize a vector with four floating point values
- inline XMVECTOR XM_CALLCONV XMVectorSet
- (
- float x,
- float y,
- float z,
- float w
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 vResult = { { { x, y, z, w } } };
- return vResult.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- float32x2_t V0 = vcreate_f32(((uint64_t)*(const uint32_t *)&x) | ((uint64_t)(*(const uint32_t *)&y) << 32));
- float32x2_t V1 = vcreate_f32(((uint64_t)*(const uint32_t *)&z) | ((uint64_t)(*(const uint32_t *)&w) << 32));
- return vcombine_f32(V0, V1);
- #elif defined(_XM_SSE_INTRINSICS_)
- return _mm_set_ps( w, z, y, x );
- #endif
- }
- //------------------------------------------------------------------------------
- // Initialize a vector with four integer values
- inline XMVECTOR XM_CALLCONV XMVectorSetInt
- (
- uint32_t x,
- uint32_t y,
- uint32_t z,
- uint32_t w
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 vResult = { { { x, y, z, w } } };
- return vResult.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x2_t V0 = vcreate_u32(((uint64_t)x) | ((uint64_t)y << 32));
- uint32x2_t V1 = vcreate_u32(((uint64_t)z) | ((uint64_t)w << 32));
- return vcombine_u32(V0, V1);
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128i V = _mm_set_epi32( w, z, y, x );
- return _mm_castsi128_ps(V);
- #endif
- }
- //------------------------------------------------------------------------------
- // Initialize a vector with a replicated floating point value
- inline XMVECTOR XM_CALLCONV XMVectorReplicate
- (
- float Value
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 vResult;
- vResult.f[0] =
- vResult.f[1] =
- vResult.f[2] =
- vResult.f[3] = Value;
- return vResult.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vdupq_n_f32( Value );
- #elif defined(_XM_SSE_INTRINSICS_)
- return _mm_set_ps1( Value );
- #endif
- }
- //------------------------------------------------------------------------------
- // Initialize a vector with a replicated floating point value passed by pointer
- _Use_decl_annotations_
- inline XMVECTOR XM_CALLCONV XMVectorReplicatePtr
- (
- const float *pValue
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- float Value = pValue[0];
- XMVECTORF32 vResult;
- vResult.f[0] =
- vResult.f[1] =
- vResult.f[2] =
- vResult.f[3] = Value;
- return vResult.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vld1q_dup_f32( pValue );
- #elif defined(_XM_AVX_INTRINSICS_)
- return _mm_broadcast_ss( pValue );
- #elif defined(_XM_SSE_INTRINSICS_)
- return _mm_load_ps1( pValue );
- #endif
- }
- //------------------------------------------------------------------------------
- // Initialize a vector with a replicated integer value
- inline XMVECTOR XM_CALLCONV XMVectorReplicateInt
- (
- uint32_t Value
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 vResult;
- vResult.u[0] =
- vResult.u[1] =
- vResult.u[2] =
- vResult.u[3] = Value;
- return vResult.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vdupq_n_u32( Value );
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128i vTemp = _mm_set1_epi32( Value );
- return _mm_castsi128_ps(vTemp);
- #endif
- }
- //------------------------------------------------------------------------------
- // Initialize a vector with a replicated integer value passed by pointer
- _Use_decl_annotations_
- inline XMVECTOR XM_CALLCONV XMVectorReplicateIntPtr
- (
- const uint32_t *pValue
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- uint32_t Value = pValue[0];
- XMVECTORU32 vResult;
- vResult.u[0] =
- vResult.u[1] =
- vResult.u[2] =
- vResult.u[3] = Value;
- return vResult.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vld1q_dup_u32(pValue);
- #elif defined(_XM_SSE_INTRINSICS_)
- return _mm_load_ps1(reinterpret_cast<const float *>(pValue));
- #endif
- }
- //------------------------------------------------------------------------------
- // Initialize a vector with all bits set (true mask)
- inline XMVECTOR XM_CALLCONV XMVectorTrueInt()
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 vResult = { { { 0xFFFFFFFFU, 0xFFFFFFFFU, 0xFFFFFFFFU, 0xFFFFFFFFU } } };
- return vResult.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vdupq_n_s32(-1);
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128i V = _mm_set1_epi32(-1);
- return _mm_castsi128_ps(V);
- #endif
- }
- //------------------------------------------------------------------------------
- // Initialize a vector with all bits clear (false mask)
- inline XMVECTOR XM_CALLCONV XMVectorFalseInt()
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 vResult = { { { 0.0f, 0.0f, 0.0f, 0.0f } } };
- return vResult;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vdupq_n_u32(0);
- #elif defined(_XM_SSE_INTRINSICS_)
- return _mm_setzero_ps();
- #endif
- }
- //------------------------------------------------------------------------------
- // Replicate the x component of the vector
- inline XMVECTOR XM_CALLCONV XMVectorSplatX
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 vResult;
- vResult.f[0] =
- vResult.f[1] =
- vResult.f[2] =
- vResult.f[3] = V.vector4_f32[0];
- return vResult.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vdupq_lane_f32( vget_low_f32( V ), 0 );
- #elif defined(_XM_AVX2_INTRINSICS_)
- return _mm_broadcastss_ps( V );
- #elif defined(_XM_SSE_INTRINSICS_)
- return XM_PERMUTE_PS( V, _MM_SHUFFLE(0, 0, 0, 0) );
- #endif
- }
- //------------------------------------------------------------------------------
- // Replicate the y component of the vector
- inline XMVECTOR XM_CALLCONV XMVectorSplatY
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 vResult;
- vResult.f[0] =
- vResult.f[1] =
- vResult.f[2] =
- vResult.f[3] = V.vector4_f32[1];
- return vResult.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vdupq_lane_f32( vget_low_f32( V ), 1 );
- #elif defined(_XM_SSE_INTRINSICS_)
- return XM_PERMUTE_PS( V, _MM_SHUFFLE(1, 1, 1, 1) );
- #endif
- }
- //------------------------------------------------------------------------------
- // Replicate the z component of the vector
- inline XMVECTOR XM_CALLCONV XMVectorSplatZ
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 vResult;
- vResult.f[0] =
- vResult.f[1] =
- vResult.f[2] =
- vResult.f[3] = V.vector4_f32[2];
- return vResult.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vdupq_lane_f32( vget_high_f32( V ), 0 );
- #elif defined(_XM_SSE_INTRINSICS_)
- return XM_PERMUTE_PS( V, _MM_SHUFFLE(2, 2, 2, 2) );
- #endif
- }
- //------------------------------------------------------------------------------
- // Replicate the w component of the vector
- inline XMVECTOR XM_CALLCONV XMVectorSplatW
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 vResult;
- vResult.f[0] =
- vResult.f[1] =
- vResult.f[2] =
- vResult.f[3] = V.vector4_f32[3];
- return vResult.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vdupq_lane_f32( vget_high_f32( V ), 1 );
- #elif defined(_XM_SSE_INTRINSICS_)
- return XM_PERMUTE_PS( V, _MM_SHUFFLE(3, 3, 3, 3) );
- #endif
- }
- //------------------------------------------------------------------------------
- // Return a vector of 1.0f,1.0f,1.0f,1.0f
- inline XMVECTOR XM_CALLCONV XMVectorSplatOne()
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 vResult;
- vResult.f[0] =
- vResult.f[1] =
- vResult.f[2] =
- vResult.f[3] = 1.0f;
- return vResult.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vdupq_n_f32(1.0f);
- #elif defined(_XM_SSE_INTRINSICS_)
- return g_XMOne;
- #endif
- }
- //------------------------------------------------------------------------------
- // Return a vector of INF,INF,INF,INF
- inline XMVECTOR XM_CALLCONV XMVectorSplatInfinity()
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 vResult;
- vResult.u[0] =
- vResult.u[1] =
- vResult.u[2] =
- vResult.u[3] = 0x7F800000;
- return vResult.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vdupq_n_u32(0x7F800000);
- #elif defined(_XM_SSE_INTRINSICS_)
- return g_XMInfinity;
- #endif
- }
- //------------------------------------------------------------------------------
- // Return a vector of Q_NAN,Q_NAN,Q_NAN,Q_NAN
- inline XMVECTOR XM_CALLCONV XMVectorSplatQNaN()
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 vResult;
- vResult.u[0] =
- vResult.u[1] =
- vResult.u[2] =
- vResult.u[3] = 0x7FC00000;
- return vResult.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vdupq_n_u32(0x7FC00000);
- #elif defined(_XM_SSE_INTRINSICS_)
- return g_XMQNaN;
- #endif
- }
- //------------------------------------------------------------------------------
- // Return a vector of 1.192092896e-7f,1.192092896e-7f,1.192092896e-7f,1.192092896e-7f
- inline XMVECTOR XM_CALLCONV XMVectorSplatEpsilon()
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 vResult;
- vResult.u[0] =
- vResult.u[1] =
- vResult.u[2] =
- vResult.u[3] = 0x34000000;
- return vResult.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vdupq_n_u32(0x34000000);
- #elif defined(_XM_SSE_INTRINSICS_)
- return g_XMEpsilon;
- #endif
- }
- //------------------------------------------------------------------------------
- // Return a vector of -0.0f (0x80000000),-0.0f,-0.0f,-0.0f
- inline XMVECTOR XM_CALLCONV XMVectorSplatSignMask()
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 vResult;
- vResult.u[0] =
- vResult.u[1] =
- vResult.u[2] =
- vResult.u[3] = 0x80000000U;
- return vResult.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vdupq_n_u32(0x80000000U);
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128i V = _mm_set1_epi32( 0x80000000 );
- return _mm_castsi128_ps(V);
- #endif
- }
- //------------------------------------------------------------------------------
- // Return a floating point value via an index. This is not a recommended
- // function to use due to performance loss.
- inline float XM_CALLCONV XMVectorGetByIndex(FXMVECTOR V, size_t i)
- {
- assert( i < 4 );
- _Analysis_assume_( i < 4 );
- #if defined(_XM_NO_INTRINSICS_)
- return V.vector4_f32[i];
- #else
- XMVECTORF32 U;
- U.v = V;
- return U.f[i];
- #endif
- }
- //------------------------------------------------------------------------------
- // Return the X component in an FPU register.
- inline float XM_CALLCONV XMVectorGetX(FXMVECTOR V)
- {
- #if defined(_XM_NO_INTRINSICS_)
- return V.vector4_f32[0];
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vgetq_lane_f32(V, 0);
- #elif defined(_XM_SSE_INTRINSICS_)
- return _mm_cvtss_f32(V);
- #endif
- }
- // Return the Y component in an FPU register.
- inline float XM_CALLCONV XMVectorGetY(FXMVECTOR V)
- {
- #if defined(_XM_NO_INTRINSICS_)
- return V.vector4_f32[1];
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vgetq_lane_f32(V, 1);
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = XM_PERMUTE_PS(V,_MM_SHUFFLE(1,1,1,1));
- return _mm_cvtss_f32(vTemp);
- #endif
- }
- // Return the Z component in an FPU register.
- inline float XM_CALLCONV XMVectorGetZ(FXMVECTOR V)
- {
- #if defined(_XM_NO_INTRINSICS_)
- return V.vector4_f32[2];
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vgetq_lane_f32(V, 2);
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = XM_PERMUTE_PS(V,_MM_SHUFFLE(2,2,2,2));
- return _mm_cvtss_f32(vTemp);
- #endif
- }
- // Return the W component in an FPU register.
- inline float XM_CALLCONV XMVectorGetW(FXMVECTOR V)
- {
- #if defined(_XM_NO_INTRINSICS_)
- return V.vector4_f32[3];
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vgetq_lane_f32(V, 3);
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = XM_PERMUTE_PS(V,_MM_SHUFFLE(3,3,3,3));
- return _mm_cvtss_f32(vTemp);
- #endif
- }
- //------------------------------------------------------------------------------
- // Store a component indexed by i into a 32 bit float location in memory.
- _Use_decl_annotations_
- inline void XM_CALLCONV XMVectorGetByIndexPtr(float *f, FXMVECTOR V, size_t i)
- {
- assert( f != nullptr );
- assert( i < 4 );
- _Analysis_assume_( i < 4 );
- #if defined(_XM_NO_INTRINSICS_)
- *f = V.vector4_f32[i];
- #else
- XMVECTORF32 U;
- U.v = V;
- *f = U.f[i];
- #endif
- }
- //------------------------------------------------------------------------------
- // Store the X component into a 32 bit float location in memory.
- _Use_decl_annotations_
- inline void XM_CALLCONV XMVectorGetXPtr(float *x, FXMVECTOR V)
- {
- assert( x != nullptr);
- #if defined(_XM_NO_INTRINSICS_)
- *x = V.vector4_f32[0];
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- vst1q_lane_f32(x,V,0);
- #elif defined(_XM_SSE_INTRINSICS_)
- _mm_store_ss(x,V);
- #endif
- }
- // Store the Y component into a 32 bit float location in memory.
- _Use_decl_annotations_
- inline void XM_CALLCONV XMVectorGetYPtr(float *y, FXMVECTOR V)
- {
- assert( y != nullptr );
- #if defined(_XM_NO_INTRINSICS_)
- *y = V.vector4_f32[1];
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- vst1q_lane_f32(y,V,1);
- #elif defined(_XM_SSE4_INTRINSICS_)
- *((int*)y) = _mm_extract_ps( V, 1 );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vResult = XM_PERMUTE_PS(V,_MM_SHUFFLE(1,1,1,1));
- _mm_store_ss(y,vResult);
- #endif
- }
- // Store the Z component into a 32 bit float location in memory.
- _Use_decl_annotations_
- inline void XM_CALLCONV XMVectorGetZPtr(float *z, FXMVECTOR V)
- {
- assert( z != nullptr );
- #if defined(_XM_NO_INTRINSICS_)
- *z = V.vector4_f32[2];
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- vst1q_lane_f32(z,V,2);
- #elif defined(_XM_SSE4_INTRINSICS_)
- *((int*)z) = _mm_extract_ps( V, 2 );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vResult = XM_PERMUTE_PS(V,_MM_SHUFFLE(2,2,2,2));
- _mm_store_ss(z,vResult);
- #endif
- }
- // Store the W component into a 32 bit float location in memory.
- _Use_decl_annotations_
- inline void XM_CALLCONV XMVectorGetWPtr(float *w, FXMVECTOR V)
- {
- assert( w != nullptr );
- #if defined(_XM_NO_INTRINSICS_)
- *w = V.vector4_f32[3];
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- vst1q_lane_f32(w,V,3);
- #elif defined(_XM_SSE4_INTRINSICS_)
- *((int*)w) = _mm_extract_ps( V, 3 );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vResult = XM_PERMUTE_PS(V,_MM_SHUFFLE(3,3,3,3));
- _mm_store_ss(w,vResult);
- #endif
- }
- //------------------------------------------------------------------------------
- // Return an integer value via an index. This is not a recommended
- // function to use due to performance loss.
- inline uint32_t XM_CALLCONV XMVectorGetIntByIndex(FXMVECTOR V, size_t i)
- {
- assert( i < 4 );
- _Analysis_assume_( i < 4 );
- #if defined(_XM_NO_INTRINSICS_)
- return V.vector4_u32[i];
- #else
- XMVECTORU32 U;
- U.v = V;
- return U.u[i];
- #endif
- }
- //------------------------------------------------------------------------------
- // Return the X component in an integer register.
- inline uint32_t XM_CALLCONV XMVectorGetIntX(FXMVECTOR V)
- {
- #if defined(_XM_NO_INTRINSICS_)
- return V.vector4_u32[0];
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vgetq_lane_u32(V, 0);
- #elif defined(_XM_SSE_INTRINSICS_)
- return static_cast<uint32_t>(_mm_cvtsi128_si32(_mm_castps_si128(V)));
- #endif
- }
- // Return the Y component in an integer register.
- inline uint32_t XM_CALLCONV XMVectorGetIntY(FXMVECTOR V)
- {
- #if defined(_XM_NO_INTRINSICS_)
- return V.vector4_u32[1];
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vgetq_lane_u32(V, 1);
- #elif defined(_XM_SSE4_INTRINSICS_)
- __m128i V1 = _mm_castps_si128( V );
- return static_cast<uint32_t>( _mm_extract_epi32( V1, 1 ) );
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128i vResulti = _mm_shuffle_epi32(_mm_castps_si128(V),_MM_SHUFFLE(1,1,1,1));
- return static_cast<uint32_t>(_mm_cvtsi128_si32(vResulti));
- #endif
- }
- // Return the Z component in an integer register.
- inline uint32_t XM_CALLCONV XMVectorGetIntZ(FXMVECTOR V)
- {
- #if defined(_XM_NO_INTRINSICS_)
- return V.vector4_u32[2];
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vgetq_lane_u32(V, 2);
- #elif defined(_XM_SSE4_INTRINSICS_)
- __m128i V1 = _mm_castps_si128( V );
- return static_cast<uint32_t>( _mm_extract_epi32( V1, 2 ) );
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128i vResulti = _mm_shuffle_epi32(_mm_castps_si128(V),_MM_SHUFFLE(2,2,2,2));
- return static_cast<uint32_t>(_mm_cvtsi128_si32(vResulti));
- #endif
- }
- // Return the W component in an integer register.
- inline uint32_t XM_CALLCONV XMVectorGetIntW(FXMVECTOR V)
- {
- #if defined(_XM_NO_INTRINSICS_)
- return V.vector4_u32[3];
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vgetq_lane_u32(V, 3);
- #elif defined(_XM_SSE4_INTRINSICS_)
- __m128i V1 = _mm_castps_si128( V );
- return static_cast<uint32_t>( _mm_extract_epi32( V1, 3 ) );
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128i vResulti = _mm_shuffle_epi32(_mm_castps_si128(V),_MM_SHUFFLE(3,3,3,3));
- return static_cast<uint32_t>(_mm_cvtsi128_si32(vResulti));
- #endif
- }
- //------------------------------------------------------------------------------
- // Store a component indexed by i into a 32 bit integer location in memory.
- _Use_decl_annotations_
- inline void XM_CALLCONV XMVectorGetIntByIndexPtr(uint32_t *x, FXMVECTOR V, size_t i)
- {
- assert( x != nullptr );
- assert( i < 4 );
- _Analysis_assume_( i < 4 );
- #if defined(_XM_NO_INTRINSICS_)
- *x = V.vector4_u32[i];
- #else
- XMVECTORU32 U;
- U.v = V;
- *x = U.u[i];
- #endif
- }
- //------------------------------------------------------------------------------
- // Store the X component into a 32 bit integer location in memory.
- _Use_decl_annotations_
- inline void XM_CALLCONV XMVectorGetIntXPtr(uint32_t *x, FXMVECTOR V)
- {
- assert( x != nullptr );
- #if defined(_XM_NO_INTRINSICS_)
- *x = V.vector4_u32[0];
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- vst1q_lane_u32(x,*reinterpret_cast<const uint32x4_t*>(&V),0);
- #elif defined(_XM_SSE_INTRINSICS_)
- _mm_store_ss(reinterpret_cast<float *>(x),V);
- #endif
- }
- // Store the Y component into a 32 bit integer location in memory.
- _Use_decl_annotations_
- inline void XM_CALLCONV XMVectorGetIntYPtr(uint32_t *y, FXMVECTOR V)
- {
- assert( y != nullptr );
- #if defined(_XM_NO_INTRINSICS_)
- *y = V.vector4_u32[1];
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- vst1q_lane_u32(y,*reinterpret_cast<const uint32x4_t*>(&V),1);
- #elif defined(_XM_SSE4_INTRINSICS_)
- __m128i V1 = _mm_castps_si128( V );
- *y = static_cast<uint32_t>( _mm_extract_epi32( V1, 1 ) );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vResult = XM_PERMUTE_PS(V,_MM_SHUFFLE(1,1,1,1));
- _mm_store_ss(reinterpret_cast<float *>(y),vResult);
- #endif
- }
- // Store the Z component into a 32 bit integer locaCantion in memory.
- _Use_decl_annotations_
- inline void XM_CALLCONV XMVectorGetIntZPtr(uint32_t *z, FXMVECTOR V)
- {
- assert( z != nullptr );
- #if defined(_XM_NO_INTRINSICS_)
- *z = V.vector4_u32[2];
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- vst1q_lane_u32(z,*reinterpret_cast<const uint32x4_t*>(&V),2);
- #elif defined(_XM_SSE4_INTRINSICS_)
- __m128i V1 = _mm_castps_si128( V );
- *z = static_cast<uint32_t>( _mm_extract_epi32( V1, 2 ) );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vResult = XM_PERMUTE_PS(V,_MM_SHUFFLE(2,2,2,2));
- _mm_store_ss(reinterpret_cast<float *>(z),vResult);
- #endif
- }
- // Store the W component into a 32 bit integer location in memory.
- _Use_decl_annotations_
- inline void XM_CALLCONV XMVectorGetIntWPtr(uint32_t *w, FXMVECTOR V)
- {
- assert( w != nullptr );
- #if defined(_XM_NO_INTRINSICS_)
- *w = V.vector4_u32[3];
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- vst1q_lane_u32(w,*reinterpret_cast<const uint32x4_t*>(&V),3);
- #elif defined(_XM_SSE4_INTRINSICS_)
- __m128i V1 = _mm_castps_si128( V );
- *w = static_cast<uint32_t>( _mm_extract_epi32( V1, 3 ) );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vResult = XM_PERMUTE_PS(V,_MM_SHUFFLE(3,3,3,3));
- _mm_store_ss(reinterpret_cast<float *>(w),vResult);
- #endif
- }
- //------------------------------------------------------------------------------
- // Set a single indexed floating point component
- inline XMVECTOR XM_CALLCONV XMVectorSetByIndex(FXMVECTOR V, float f, size_t i)
- {
- assert( i < 4 );
- _Analysis_assume_( i < 4 );
- XMVECTORF32 U;
- U.v = V;
- U.f[i] = f;
- return U.v;
- }
- //------------------------------------------------------------------------------
- // Sets the X component of a vector to a passed floating point value
- inline XMVECTOR XM_CALLCONV XMVectorSetX(FXMVECTOR V, float x)
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 U = { { {
- x,
- V.vector4_f32[1],
- V.vector4_f32[2],
- V.vector4_f32[3]
- } } };
- return U.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vsetq_lane_f32(x,V,0);
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vResult = _mm_set_ss(x);
- vResult = _mm_move_ss(V,vResult);
- return vResult;
- #endif
- }
- // Sets the Y component of a vector to a passed floating point value
- inline XMVECTOR XM_CALLCONV XMVectorSetY(FXMVECTOR V, float y)
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 U = { { {
- V.vector4_f32[0],
- y,
- V.vector4_f32[2],
- V.vector4_f32[3]
- } } };
- return U.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vsetq_lane_f32(y,V,1);
- #elif defined(_XM_SSE4_INTRINSICS_)
- XMVECTOR vResult = _mm_set_ss(y);
- vResult = _mm_insert_ps( V, vResult, 0x10 );
- return vResult;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Swap y and x
- XMVECTOR vResult = XM_PERMUTE_PS(V,_MM_SHUFFLE(3,2,0,1));
- // Convert input to vector
- XMVECTOR vTemp = _mm_set_ss(y);
- // Replace the x component
- vResult = _mm_move_ss(vResult,vTemp);
- // Swap y and x again
- vResult = XM_PERMUTE_PS(vResult,_MM_SHUFFLE(3,2,0,1));
- return vResult;
- #endif
- }
- // Sets the Z component of a vector to a passed floating point value
- inline XMVECTOR XM_CALLCONV XMVectorSetZ(FXMVECTOR V, float z)
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 U = { { {
- V.vector4_f32[0],
- V.vector4_f32[1],
- z,
- V.vector4_f32[3]
- } } };
- return U.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vsetq_lane_f32(z,V,2);
- #elif defined(_XM_SSE4_INTRINSICS_)
- XMVECTOR vResult = _mm_set_ss(z);
- vResult = _mm_insert_ps( V, vResult, 0x20 );
- return vResult;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Swap z and x
- XMVECTOR vResult = XM_PERMUTE_PS(V,_MM_SHUFFLE(3,0,1,2));
- // Convert input to vector
- XMVECTOR vTemp = _mm_set_ss(z);
- // Replace the x component
- vResult = _mm_move_ss(vResult,vTemp);
- // Swap z and x again
- vResult = XM_PERMUTE_PS(vResult,_MM_SHUFFLE(3,0,1,2));
- return vResult;
- #endif
- }
- // Sets the W component of a vector to a passed floating point value
- inline XMVECTOR XM_CALLCONV XMVectorSetW(FXMVECTOR V, float w)
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 U = { { {
- V.vector4_f32[0],
- V.vector4_f32[1],
- V.vector4_f32[2],
- w
- } } };
- return U.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vsetq_lane_f32(w,V,3);
- #elif defined(_XM_SSE4_INTRINSICS_)
- XMVECTOR vResult = _mm_set_ss(w);
- vResult = _mm_insert_ps( V, vResult, 0x30 );
- return vResult;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Swap w and x
- XMVECTOR vResult = XM_PERMUTE_PS(V,_MM_SHUFFLE(0,2,1,3));
- // Convert input to vector
- XMVECTOR vTemp = _mm_set_ss(w);
- // Replace the x component
- vResult = _mm_move_ss(vResult,vTemp);
- // Swap w and x again
- vResult = XM_PERMUTE_PS(vResult,_MM_SHUFFLE(0,2,1,3));
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- // Sets a component of a vector to a floating point value passed by pointer
- _Use_decl_annotations_
- inline XMVECTOR XM_CALLCONV XMVectorSetByIndexPtr(FXMVECTOR V, const float *f, size_t i)
- {
- assert( f != nullptr );
- assert( i < 4 );
- _Analysis_assume_( i < 4 );
- XMVECTORF32 U;
- U.v = V;
- U.f[i] = *f;
- return U.v;
- }
- //------------------------------------------------------------------------------
- // Sets the X component of a vector to a floating point value passed by pointer
- _Use_decl_annotations_
- inline XMVECTOR XM_CALLCONV XMVectorSetXPtr(FXMVECTOR V, const float *x)
- {
- assert( x != nullptr );
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 U = { { {
- *x,
- V.vector4_f32[1],
- V.vector4_f32[2],
- V.vector4_f32[3]
- } } };
- return U.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vld1q_lane_f32(x,V,0);
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vResult = _mm_load_ss(x);
- vResult = _mm_move_ss(V,vResult);
- return vResult;
- #endif
- }
- // Sets the Y component of a vector to a floating point value passed by pointer
- _Use_decl_annotations_
- inline XMVECTOR XM_CALLCONV XMVectorSetYPtr(FXMVECTOR V, const float *y)
- {
- assert( y != nullptr );
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 U = { { {
- V.vector4_f32[0],
- *y,
- V.vector4_f32[2],
- V.vector4_f32[3]
- } } };
- return U.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vld1q_lane_f32(y,V,1);
- #elif defined(_XM_SSE_INTRINSICS_)
- // Swap y and x
- XMVECTOR vResult = XM_PERMUTE_PS(V,_MM_SHUFFLE(3,2,0,1));
- // Convert input to vector
- XMVECTOR vTemp = _mm_load_ss(y);
- // Replace the x component
- vResult = _mm_move_ss(vResult,vTemp);
- // Swap y and x again
- vResult = XM_PERMUTE_PS(vResult,_MM_SHUFFLE(3,2,0,1));
- return vResult;
- #endif
- }
- // Sets the Z component of a vector to a floating point value passed by pointer
- _Use_decl_annotations_
- inline XMVECTOR XM_CALLCONV XMVectorSetZPtr(FXMVECTOR V, const float *z)
- {
- assert( z != nullptr );
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 U = { { {
- V.vector4_f32[0],
- V.vector4_f32[1],
- *z,
- V.vector4_f32[3]
- } } };
- return U.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vld1q_lane_f32(z,V,2);
- #elif defined(_XM_SSE_INTRINSICS_)
- // Swap z and x
- XMVECTOR vResult = XM_PERMUTE_PS(V,_MM_SHUFFLE(3,0,1,2));
- // Convert input to vector
- XMVECTOR vTemp = _mm_load_ss(z);
- // Replace the x component
- vResult = _mm_move_ss(vResult,vTemp);
- // Swap z and x again
- vResult = XM_PERMUTE_PS(vResult,_MM_SHUFFLE(3,0,1,2));
- return vResult;
- #endif
- }
- // Sets the W component of a vector to a floating point value passed by pointer
- _Use_decl_annotations_
- inline XMVECTOR XM_CALLCONV XMVectorSetWPtr(FXMVECTOR V, const float *w)
- {
- assert( w != nullptr );
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 U = { { {
- V.vector4_f32[0],
- V.vector4_f32[1],
- V.vector4_f32[2],
- *w
- } } };
- return U.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vld1q_lane_f32(w,V,3);
- #elif defined(_XM_SSE_INTRINSICS_)
- // Swap w and x
- XMVECTOR vResult = XM_PERMUTE_PS(V,_MM_SHUFFLE(0,2,1,3));
- // Convert input to vector
- XMVECTOR vTemp = _mm_load_ss(w);
- // Replace the x component
- vResult = _mm_move_ss(vResult,vTemp);
- // Swap w and x again
- vResult = XM_PERMUTE_PS(vResult,_MM_SHUFFLE(0,2,1,3));
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- // Sets a component of a vector to an integer passed by value
- inline XMVECTOR XM_CALLCONV XMVectorSetIntByIndex(FXMVECTOR V, uint32_t x, size_t i)
- {
- assert( i < 4 );
- _Analysis_assume_( i < 4 );
- XMVECTORU32 tmp;
- tmp.v = V;
- tmp.u[i] = x;
- return tmp;
- }
- //------------------------------------------------------------------------------
- // Sets the X component of a vector to an integer passed by value
- inline XMVECTOR XM_CALLCONV XMVectorSetIntX(FXMVECTOR V, uint32_t x)
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 U = { { {
- x,
- V.vector4_u32[1],
- V.vector4_u32[2],
- V.vector4_u32[3]
- } } };
- return U.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vsetq_lane_u32(x,V,0);
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128i vTemp = _mm_cvtsi32_si128(x);
- XMVECTOR vResult = _mm_move_ss(V,_mm_castsi128_ps(vTemp));
- return vResult;
- #endif
- }
- // Sets the Y component of a vector to an integer passed by value
- inline XMVECTOR XM_CALLCONV XMVectorSetIntY(FXMVECTOR V, uint32_t y)
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 U = { { {
- V.vector4_u32[0],
- y,
- V.vector4_u32[2],
- V.vector4_u32[3]
- } } };
- return U.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vsetq_lane_u32(y,V,1);
- #elif defined(_XM_SSE4_INTRINSICS_)
- __m128i vResult = _mm_castps_si128( V );
- vResult = _mm_insert_epi32( vResult, static_cast<int>(y), 1 );
- return _mm_castsi128_ps( vResult );
- #elif defined(_XM_SSE_INTRINSICS_)
- // Swap y and x
- XMVECTOR vResult = XM_PERMUTE_PS(V,_MM_SHUFFLE(3,2,0,1));
- // Convert input to vector
- __m128i vTemp = _mm_cvtsi32_si128(y);
- // Replace the x component
- vResult = _mm_move_ss(vResult,_mm_castsi128_ps(vTemp));
- // Swap y and x again
- vResult = XM_PERMUTE_PS(vResult,_MM_SHUFFLE(3,2,0,1));
- return vResult;
- #endif
- }
- // Sets the Z component of a vector to an integer passed by value
- inline XMVECTOR XM_CALLCONV XMVectorSetIntZ(FXMVECTOR V, uint32_t z)
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 U = { { {
- V.vector4_u32[0],
- V.vector4_u32[1],
- z,
- V.vector4_u32[3]
- } } };
- return U.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vsetq_lane_u32(z,V,2);
- #elif defined(_XM_SSE4_INTRINSICS_)
- __m128i vResult = _mm_castps_si128( V );
- vResult = _mm_insert_epi32( vResult, static_cast<int>(z), 2 );
- return _mm_castsi128_ps( vResult );
- #elif defined(_XM_SSE_INTRINSICS_)
- // Swap z and x
- XMVECTOR vResult = XM_PERMUTE_PS(V,_MM_SHUFFLE(3,0,1,2));
- // Convert input to vector
- __m128i vTemp = _mm_cvtsi32_si128(z);
- // Replace the x component
- vResult = _mm_move_ss(vResult,_mm_castsi128_ps(vTemp));
- // Swap z and x again
- vResult = XM_PERMUTE_PS(vResult,_MM_SHUFFLE(3,0,1,2));
- return vResult;
- #endif
- }
- // Sets the W component of a vector to an integer passed by value
- inline XMVECTOR XM_CALLCONV XMVectorSetIntW(FXMVECTOR V, uint32_t w)
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 U = { { {
- V.vector4_u32[0],
- V.vector4_u32[1],
- V.vector4_u32[2],
- w
- } } };
- return U.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vsetq_lane_u32(w,V,3);
- #elif defined(_XM_SSE4_INTRINSICS_)
- __m128i vResult = _mm_castps_si128( V );
- vResult = _mm_insert_epi32( vResult, static_cast<int>(w), 3 );
- return _mm_castsi128_ps( vResult );
- #elif defined(_XM_SSE_INTRINSICS_)
- // Swap w and x
- XMVECTOR vResult = XM_PERMUTE_PS(V,_MM_SHUFFLE(0,2,1,3));
- // Convert input to vector
- __m128i vTemp = _mm_cvtsi32_si128(w);
- // Replace the x component
- vResult = _mm_move_ss(vResult,_mm_castsi128_ps(vTemp));
- // Swap w and x again
- vResult = XM_PERMUTE_PS(vResult,_MM_SHUFFLE(0,2,1,3));
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- // Sets a component of a vector to an integer value passed by pointer
- _Use_decl_annotations_
- inline XMVECTOR XM_CALLCONV XMVectorSetIntByIndexPtr(FXMVECTOR V, const uint32_t *x, size_t i)
- {
- assert( x != nullptr );
- assert( i < 4 );
- _Analysis_assume_( i < 4 );
- XMVECTORU32 tmp;
- tmp.v = V;
- tmp.u[i] = *x;
- return tmp;
- }
- //------------------------------------------------------------------------------
- // Sets the X component of a vector to an integer value passed by pointer
- _Use_decl_annotations_
- inline XMVECTOR XM_CALLCONV XMVectorSetIntXPtr(FXMVECTOR V, const uint32_t *x)
- {
- assert( x != nullptr );
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 U = { { {
- *x,
- V.vector4_u32[1],
- V.vector4_u32[2],
- V.vector4_u32[3]
- } } };
- return U.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vld1q_lane_u32(x,*reinterpret_cast<const uint32x4_t *>(&V),0);
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_load_ss(reinterpret_cast<const float *>(x));
- XMVECTOR vResult = _mm_move_ss(V,vTemp);
- return vResult;
- #endif
- }
- // Sets the Y component of a vector to an integer value passed by pointer
- _Use_decl_annotations_
- inline XMVECTOR XM_CALLCONV XMVectorSetIntYPtr(FXMVECTOR V, const uint32_t *y)
- {
- assert( y != nullptr );
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 U = { { {
- V.vector4_u32[0],
- *y,
- V.vector4_u32[2],
- V.vector4_u32[3]
- } } };
- return U.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vld1q_lane_u32(y,*reinterpret_cast<const uint32x4_t *>(&V),1);
- #elif defined(_XM_SSE_INTRINSICS_)
- // Swap y and x
- XMVECTOR vResult = XM_PERMUTE_PS(V,_MM_SHUFFLE(3,2,0,1));
- // Convert input to vector
- XMVECTOR vTemp = _mm_load_ss(reinterpret_cast<const float *>(y));
- // Replace the x component
- vResult = _mm_move_ss(vResult,vTemp);
- // Swap y and x again
- vResult = XM_PERMUTE_PS(vResult,_MM_SHUFFLE(3,2,0,1));
- return vResult;
- #endif
- }
- // Sets the Z component of a vector to an integer value passed by pointer
- _Use_decl_annotations_
- inline XMVECTOR XM_CALLCONV XMVectorSetIntZPtr(FXMVECTOR V, const uint32_t *z)
- {
- assert( z != nullptr );
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 U = { { {
- V.vector4_u32[0],
- V.vector4_u32[1],
- *z,
- V.vector4_u32[3]
- } } };
- return U.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vld1q_lane_u32(z,*reinterpret_cast<const uint32x4_t *>(&V),2);
- #elif defined(_XM_SSE_INTRINSICS_)
- // Swap z and x
- XMVECTOR vResult = XM_PERMUTE_PS(V,_MM_SHUFFLE(3,0,1,2));
- // Convert input to vector
- XMVECTOR vTemp = _mm_load_ss(reinterpret_cast<const float *>(z));
- // Replace the x component
- vResult = _mm_move_ss(vResult,vTemp);
- // Swap z and x again
- vResult = XM_PERMUTE_PS(vResult,_MM_SHUFFLE(3,0,1,2));
- return vResult;
- #endif
- }
- // Sets the W component of a vector to an integer value passed by pointer
- _Use_decl_annotations_
- inline XMVECTOR XM_CALLCONV XMVectorSetIntWPtr(FXMVECTOR V, const uint32_t *w)
- {
- assert( w != nullptr );
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 U = { { {
- V.vector4_u32[0],
- V.vector4_u32[1],
- V.vector4_u32[2],
- *w
- } } };
- return U.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vld1q_lane_u32(w,*reinterpret_cast<const uint32x4_t *>(&V),3);
- #elif defined(_XM_SSE_INTRINSICS_)
- // Swap w and x
- XMVECTOR vResult = XM_PERMUTE_PS(V,_MM_SHUFFLE(0,2,1,3));
- // Convert input to vector
- XMVECTOR vTemp = _mm_load_ss(reinterpret_cast<const float *>(w));
- // Replace the x component
- vResult = _mm_move_ss(vResult,vTemp);
- // Swap w and x again
- vResult = XM_PERMUTE_PS(vResult,_MM_SHUFFLE(0,2,1,3));
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorSwizzle
- (
- FXMVECTOR V,
- uint32_t E0,
- uint32_t E1,
- uint32_t E2,
- uint32_t E3
- )
- {
- assert( (E0 < 4) && (E1 < 4) && (E2 < 4) && (E3 < 4) );
- _Analysis_assume_( (E0 < 4) && (E1 < 4) && (E2 < 4) && (E3 < 4) );
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- V.vector4_f32[E0],
- V.vector4_f32[E1],
- V.vector4_f32[E2],
- V.vector4_f32[E3]
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- static const uint32_t ControlElement[ 4 ] =
- {
- 0x03020100, // XM_SWIZZLE_X
- 0x07060504, // XM_SWIZZLE_Y
- 0x0B0A0908, // XM_SWIZZLE_Z
- 0x0F0E0D0C, // XM_SWIZZLE_W
- };
- int8x8x2_t tbl;
- tbl.val[0] = vget_low_f32(V);
- tbl.val[1] = vget_high_f32(V);
- uint32x2_t idx = vcreate_u32( ((uint64_t)ControlElement[E0]) | (((uint64_t)ControlElement[E1]) << 32) );
- const uint8x8_t rL = vtbl2_u8( tbl, idx );
- idx = vcreate_u32( ((uint64_t)ControlElement[E2]) | (((uint64_t)ControlElement[E3]) << 32) );
- const uint8x8_t rH = vtbl2_u8( tbl, idx );
- return vcombine_f32( rL, rH );
- #elif defined(_XM_AVX_INTRINSICS_)
- unsigned int elem[4] = { E0, E1, E2, E3 };
- __m128i vControl = _mm_loadu_si128( reinterpret_cast<const __m128i *>(&elem[0]) );
- return _mm_permutevar_ps( V, vControl );
- #else
- const uint32_t *aPtr = (const uint32_t* )(&V);
- XMVECTOR Result;
- uint32_t *pWork = (uint32_t*)(&Result);
- pWork[0] = aPtr[E0];
- pWork[1] = aPtr[E1];
- pWork[2] = aPtr[E2];
- pWork[3] = aPtr[E3];
- return Result;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorPermute
- (
- FXMVECTOR V1,
- FXMVECTOR V2,
- uint32_t PermuteX,
- uint32_t PermuteY,
- uint32_t PermuteZ,
- uint32_t PermuteW
- )
- {
- assert( PermuteX <= 7 && PermuteY <= 7 && PermuteZ <= 7 && PermuteW <= 7 );
- _Analysis_assume_( PermuteX <= 7 && PermuteY <= 7 && PermuteZ <= 7 && PermuteW <= 7 );
- #if defined(_XM_ARM_NEON_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_)
- static const uint32_t ControlElement[ 8 ] =
- {
- 0x03020100, // XM_PERMUTE_0X
- 0x07060504, // XM_PERMUTE_0Y
- 0x0B0A0908, // XM_PERMUTE_0Z
- 0x0F0E0D0C, // XM_PERMUTE_0W
- 0x13121110, // XM_PERMUTE_1X
- 0x17161514, // XM_PERMUTE_1Y
- 0x1B1A1918, // XM_PERMUTE_1Z
- 0x1F1E1D1C, // XM_PERMUTE_1W
- };
- int8x8x4_t tbl;
- tbl.val[0] = vget_low_f32(V1);
- tbl.val[1] = vget_high_f32(V1);
- tbl.val[2] = vget_low_f32(V2);
- tbl.val[3] = vget_high_f32(V2);
- uint32x2_t idx = vcreate_u32( ((uint64_t)ControlElement[PermuteX]) | (((uint64_t)ControlElement[PermuteY]) << 32) );
- const uint8x8_t rL = vtbl4_u8( tbl, idx );
- idx = vcreate_u32( ((uint64_t)ControlElement[PermuteZ]) | (((uint64_t)ControlElement[PermuteW]) << 32) );
- const uint8x8_t rH = vtbl4_u8( tbl, idx );
- return vcombine_f32( rL, rH );
- #elif defined(_XM_AVX_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_)
- static const XMVECTORU32 three = { { { 3, 3, 3, 3 } } };
- _declspec(align(16)) unsigned int elem[4] = { PermuteX, PermuteY, PermuteZ, PermuteW };
- __m128i vControl = _mm_load_si128( reinterpret_cast<const __m128i *>(&elem[0]) );
-
- __m128i vSelect = _mm_cmpgt_epi32( vControl, three );
- vControl = _mm_castps_si128( _mm_and_ps( _mm_castsi128_ps( vControl ), three ) );
- __m128 shuffled1 = _mm_permutevar_ps( V1, vControl );
- __m128 shuffled2 = _mm_permutevar_ps( V2, vControl );
- __m128 masked1 = _mm_andnot_ps( _mm_castsi128_ps( vSelect ), shuffled1 );
- __m128 masked2 = _mm_and_ps( _mm_castsi128_ps( vSelect ), shuffled2 );
- return _mm_or_ps( masked1, masked2 );
- #else
-
- const uint32_t *aPtr[2];
- aPtr[0] = (const uint32_t* )(&V1);
- aPtr[1] = (const uint32_t* )(&V2);
- XMVECTOR Result;
- uint32_t *pWork = (uint32_t*)(&Result);
- const uint32_t i0 = PermuteX & 3;
- const uint32_t vi0 = PermuteX >> 2;
- pWork[0] = aPtr[vi0][i0];
- const uint32_t i1 = PermuteY & 3;
- const uint32_t vi1 = PermuteY >> 2;
- pWork[1] = aPtr[vi1][i1];
- const uint32_t i2 = PermuteZ & 3;
- const uint32_t vi2 = PermuteZ >> 2;
- pWork[2] = aPtr[vi2][i2];
- const uint32_t i3 = PermuteW & 3;
- const uint32_t vi3 = PermuteW >> 2;
- pWork[3] = aPtr[vi3][i3];
- return Result;
- #endif
- }
- //------------------------------------------------------------------------------
- // Define a control vector to be used in XMVectorSelect
- // operations. The four integers specified in XMVectorSelectControl
- // serve as indices to select between components in two vectors.
- // The first index controls selection for the first component of
- // the vectors involved in a select operation, the second index
- // controls selection for the second component etc. A value of
- // zero for an index causes the corresponding component from the first
- // vector to be selected whereas a one causes the component from the
- // second vector to be selected instead.
- inline XMVECTOR XM_CALLCONV XMVectorSelectControl
- (
- uint32_t VectorIndex0,
- uint32_t VectorIndex1,
- uint32_t VectorIndex2,
- uint32_t VectorIndex3
- )
- {
- #if defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_)
- // x=Index0,y=Index1,z=Index2,w=Index3
- __m128i vTemp = _mm_set_epi32(VectorIndex3,VectorIndex2,VectorIndex1,VectorIndex0);
- // Any non-zero entries become 0xFFFFFFFF else 0
- vTemp = _mm_cmpgt_epi32(vTemp,g_XMZero);
- return _mm_castsi128_ps(vTemp);
- #elif defined(_XM_ARM_NEON_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_)
- int32x2_t V0 = vcreate_s32(((uint64_t)VectorIndex0) | ((uint64_t)VectorIndex1 << 32));
- int32x2_t V1 = vcreate_s32(((uint64_t)VectorIndex2) | ((uint64_t)VectorIndex3 << 32));
- int32x4_t vTemp = vcombine_s32(V0, V1);
- // Any non-zero entries become 0xFFFFFFFF else 0
- return vcgtq_s32(vTemp,g_XMZero);
- #else
- XMVECTOR ControlVector;
- const uint32_t ControlElement[] =
- {
- XM_SELECT_0,
- XM_SELECT_1
- };
- assert(VectorIndex0 < 2);
- assert(VectorIndex1 < 2);
- assert(VectorIndex2 < 2);
- assert(VectorIndex3 < 2);
- _Analysis_assume_(VectorIndex0 < 2);
- _Analysis_assume_(VectorIndex1 < 2);
- _Analysis_assume_(VectorIndex2 < 2);
- _Analysis_assume_(VectorIndex3 < 2);
- ControlVector.vector4_u32[0] = ControlElement[VectorIndex0];
- ControlVector.vector4_u32[1] = ControlElement[VectorIndex1];
- ControlVector.vector4_u32[2] = ControlElement[VectorIndex2];
- ControlVector.vector4_u32[3] = ControlElement[VectorIndex3];
- return ControlVector;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorSelect
- (
- FXMVECTOR V1,
- FXMVECTOR V2,
- FXMVECTOR Control
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 Result = { { {
- (V1.vector4_u32[0] & ~Control.vector4_u32[0]) | (V2.vector4_u32[0] & Control.vector4_u32[0]),
- (V1.vector4_u32[1] & ~Control.vector4_u32[1]) | (V2.vector4_u32[1] & Control.vector4_u32[1]),
- (V1.vector4_u32[2] & ~Control.vector4_u32[2]) | (V2.vector4_u32[2] & Control.vector4_u32[2]),
- (V1.vector4_u32[3] & ~Control.vector4_u32[3]) | (V2.vector4_u32[3] & Control.vector4_u32[3]),
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vbslq_f32( Control, V2, V1 );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp1 = _mm_andnot_ps(Control,V1);
- XMVECTOR vTemp2 = _mm_and_ps(V2,Control);
- return _mm_or_ps(vTemp1,vTemp2);
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorMergeXY
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 Result = { { {
- V1.vector4_u32[0],
- V2.vector4_u32[0],
- V1.vector4_u32[1],
- V2.vector4_u32[1],
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vzipq_f32( V1, V2 ).val[0];
- #elif defined(_XM_SSE_INTRINSICS_)
- return _mm_unpacklo_ps( V1, V2 );
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorMergeZW
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 Result = { { {
- V1.vector4_u32[2],
- V2.vector4_u32[2],
- V1.vector4_u32[3],
- V2.vector4_u32[3]
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vzipq_f32( V1, V2 ).val[1];
- #elif defined(_XM_SSE_INTRINSICS_)
- return _mm_unpackhi_ps( V1, V2 );
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorShiftLeft(FXMVECTOR V1, FXMVECTOR V2, uint32_t Elements)
- {
- assert( Elements < 4 );
- _Analysis_assume_( Elements < 4 );
- return XMVectorPermute(V1, V2, Elements, ((Elements) + 1), ((Elements) + 2), ((Elements) + 3));
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorRotateLeft(FXMVECTOR V, uint32_t Elements)
- {
- assert( Elements < 4 );
- _Analysis_assume_( Elements < 4 );
- return XMVectorSwizzle( V, Elements & 3, (Elements + 1) & 3, (Elements + 2) & 3, (Elements + 3) & 3 );
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorRotateRight(FXMVECTOR V, uint32_t Elements)
- {
- assert( Elements < 4 );
- _Analysis_assume_( Elements < 4 );
- return XMVectorSwizzle( V, (4 - (Elements)) & 3, (5 - (Elements)) & 3, (6 - (Elements)) & 3, (7 - (Elements)) & 3 );
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorInsert(FXMVECTOR VD, FXMVECTOR VS, uint32_t VSLeftRotateElements,
- uint32_t Select0, uint32_t Select1, uint32_t Select2, uint32_t Select3)
- {
- XMVECTOR Control = XMVectorSelectControl(Select0&1, Select1&1, Select2&1, Select3&1);
- return XMVectorSelect( VD, XMVectorRotateLeft(VS, VSLeftRotateElements), Control );
- }
- //------------------------------------------------------------------------------
- // Comparison operations
- //------------------------------------------------------------------------------
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorEqual
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 Control = { { {
- (V1.vector4_f32[0] == V2.vector4_f32[0]) ? 0xFFFFFFFF : 0,
- (V1.vector4_f32[1] == V2.vector4_f32[1]) ? 0xFFFFFFFF : 0,
- (V1.vector4_f32[2] == V2.vector4_f32[2]) ? 0xFFFFFFFF : 0,
- (V1.vector4_f32[3] == V2.vector4_f32[3]) ? 0xFFFFFFFF : 0,
- } } };
- return Control.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vceqq_f32( V1, V2 );
- #elif defined(_XM_SSE_INTRINSICS_)
- return _mm_cmpeq_ps( V1, V2 );
- #endif
- }
- //------------------------------------------------------------------------------
- _Use_decl_annotations_
- inline XMVECTOR XM_CALLCONV XMVectorEqualR
- (
- uint32_t* pCR,
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- assert( pCR != nullptr );
- #if defined(_XM_NO_INTRINSICS_)
- uint32_t ux = (V1.vector4_f32[0] == V2.vector4_f32[0]) ? 0xFFFFFFFFU : 0;
- uint32_t uy = (V1.vector4_f32[1] == V2.vector4_f32[1]) ? 0xFFFFFFFFU : 0;
- uint32_t uz = (V1.vector4_f32[2] == V2.vector4_f32[2]) ? 0xFFFFFFFFU : 0;
- uint32_t uw = (V1.vector4_f32[3] == V2.vector4_f32[3]) ? 0xFFFFFFFFU : 0;
- uint32_t CR = 0;
- if (ux&uy&uz&uw)
- {
- // All elements are greater
- CR = XM_CRMASK_CR6TRUE;
- }
- else if (!(ux|uy|uz|uw))
- {
- // All elements are not greater
- CR = XM_CRMASK_CR6FALSE;
- }
- *pCR = CR;
- XMVECTORU32 Control = { { { ux, uy, uz, uw } } };
- return Control;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t vResult = vceqq_f32( V1, V2 );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- uint32_t r = vget_lane_u32(vTemp.val[1], 1);
- uint32_t CR = 0;
- if ( r == 0xFFFFFFFFU )
- {
- // All elements are equal
- CR = XM_CRMASK_CR6TRUE;
- }
- else if ( !r )
- {
- // All elements are not equal
- CR = XM_CRMASK_CR6FALSE;
- }
- *pCR = CR;
- return vResult;
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_cmpeq_ps(V1,V2);
- uint32_t CR = 0;
- int iTest = _mm_movemask_ps(vTemp);
- if (iTest==0xf)
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if (!iTest)
- {
- // All elements are not greater
- CR = XM_CRMASK_CR6FALSE;
- }
- *pCR = CR;
- return vTemp;
- #endif
- }
- //------------------------------------------------------------------------------
- // Treat the components of the vectors as unsigned integers and
- // compare individual bits between the two. This is useful for
- // comparing control vectors and result vectors returned from
- // other comparison operations.
- inline XMVECTOR XM_CALLCONV XMVectorEqualInt
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 Control = { { {
- (V1.vector4_u32[0] == V2.vector4_u32[0]) ? 0xFFFFFFFF : 0,
- (V1.vector4_u32[1] == V2.vector4_u32[1]) ? 0xFFFFFFFF : 0,
- (V1.vector4_u32[2] == V2.vector4_u32[2]) ? 0xFFFFFFFF : 0,
- (V1.vector4_u32[3] == V2.vector4_u32[3]) ? 0xFFFFFFFF : 0,
- } } };
- return Control.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vceqq_u32( V1, V2 );
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128i V = _mm_cmpeq_epi32( _mm_castps_si128(V1),_mm_castps_si128(V2) );
- return _mm_castsi128_ps(V);
- #endif
- }
- //------------------------------------------------------------------------------
- _Use_decl_annotations_
- inline XMVECTOR XM_CALLCONV XMVectorEqualIntR
- (
- uint32_t* pCR,
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- assert( pCR != nullptr );
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR Control = XMVectorEqualInt(V1, V2);
- *pCR = 0;
- if (XMVector4EqualInt(Control, XMVectorTrueInt()))
- {
- // All elements are equal
- *pCR |= XM_CRMASK_CR6TRUE;
- }
- else if (XMVector4EqualInt(Control, XMVectorFalseInt()))
- {
- // All elements are not equal
- *pCR |= XM_CRMASK_CR6FALSE;
- }
- return Control;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t vResult = vceqq_u32( V1, V2 );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- uint32_t r = vget_lane_u32(vTemp.val[1], 1);
- uint32_t CR = 0;
- if ( r == 0xFFFFFFFFU )
- {
- // All elements are equal
- CR = XM_CRMASK_CR6TRUE;
- }
- else if ( !r )
- {
- // All elements are not equal
- CR = XM_CRMASK_CR6FALSE;
- }
- *pCR = CR;
- return vResult;
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128i V = _mm_cmpeq_epi32( _mm_castps_si128(V1),_mm_castps_si128(V2) );
- int iTemp = _mm_movemask_ps(_mm_castsi128_ps(V));
- uint32_t CR = 0;
- if (iTemp==0x0F)
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if (!iTemp)
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- *pCR = CR;
- return _mm_castsi128_ps(V);
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorNearEqual
- (
- FXMVECTOR V1,
- FXMVECTOR V2,
- FXMVECTOR Epsilon
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- float fDeltax = V1.vector4_f32[0]-V2.vector4_f32[0];
- float fDeltay = V1.vector4_f32[1]-V2.vector4_f32[1];
- float fDeltaz = V1.vector4_f32[2]-V2.vector4_f32[2];
- float fDeltaw = V1.vector4_f32[3]-V2.vector4_f32[3];
- fDeltax = fabsf(fDeltax);
- fDeltay = fabsf(fDeltay);
- fDeltaz = fabsf(fDeltaz);
- fDeltaw = fabsf(fDeltaw);
- XMVECTORU32 Control = { { {
- (fDeltax <= Epsilon.vector4_f32[0]) ? 0xFFFFFFFFU : 0,
- (fDeltay <= Epsilon.vector4_f32[1]) ? 0xFFFFFFFFU : 0,
- (fDeltaz <= Epsilon.vector4_f32[2]) ? 0xFFFFFFFFU : 0,
- (fDeltaw <= Epsilon.vector4_f32[3]) ? 0xFFFFFFFFU : 0,
- } } };
- return Control.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- XMVECTOR vDelta = vsubq_f32(V1,V2);
- return vacleq_f32( vDelta, Epsilon );
- #elif defined(_XM_SSE_INTRINSICS_)
- // Get the difference
- XMVECTOR vDelta = _mm_sub_ps(V1,V2);
- // Get the absolute value of the difference
- XMVECTOR vTemp = _mm_setzero_ps();
- vTemp = _mm_sub_ps(vTemp,vDelta);
- vTemp = _mm_max_ps(vTemp,vDelta);
- vTemp = _mm_cmple_ps(vTemp,Epsilon);
- return vTemp;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorNotEqual
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 Control = { { {
- (V1.vector4_f32[0] != V2.vector4_f32[0]) ? 0xFFFFFFFF : 0,
- (V1.vector4_f32[1] != V2.vector4_f32[1]) ? 0xFFFFFFFF : 0,
- (V1.vector4_f32[2] != V2.vector4_f32[2]) ? 0xFFFFFFFF : 0,
- (V1.vector4_f32[3] != V2.vector4_f32[3]) ? 0xFFFFFFFF : 0,
- } } };
- return Control.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vmvnq_u32(vceqq_f32(V1, V2));
- #elif defined(_XM_SSE_INTRINSICS_)
- return _mm_cmpneq_ps( V1, V2 );
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorNotEqualInt
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 Control = { { {
- (V1.vector4_u32[0] != V2.vector4_u32[0]) ? 0xFFFFFFFFU : 0,
- (V1.vector4_u32[1] != V2.vector4_u32[1]) ? 0xFFFFFFFFU : 0,
- (V1.vector4_u32[2] != V2.vector4_u32[2]) ? 0xFFFFFFFFU : 0,
- (V1.vector4_u32[3] != V2.vector4_u32[3]) ? 0xFFFFFFFFU : 0
- } } };
- return Control.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vmvnq_u32(vceqq_u32(V1, V2));
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128i V = _mm_cmpeq_epi32( _mm_castps_si128(V1),_mm_castps_si128(V2) );
- return _mm_xor_ps(_mm_castsi128_ps(V),g_XMNegOneMask);
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorGreater
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 Control = { { {
- (V1.vector4_f32[0] > V2.vector4_f32[0]) ? 0xFFFFFFFF : 0,
- (V1.vector4_f32[1] > V2.vector4_f32[1]) ? 0xFFFFFFFF : 0,
- (V1.vector4_f32[2] > V2.vector4_f32[2]) ? 0xFFFFFFFF : 0,
- (V1.vector4_f32[3] > V2.vector4_f32[3]) ? 0xFFFFFFFF : 0
- } } };
- return Control.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vcgtq_f32( V1, V2 );
- #elif defined(_XM_SSE_INTRINSICS_)
- return _mm_cmpgt_ps( V1, V2 );
- #endif
- }
- //------------------------------------------------------------------------------
- _Use_decl_annotations_
- inline XMVECTOR XM_CALLCONV XMVectorGreaterR
- (
- uint32_t* pCR,
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- assert( pCR != nullptr );
- #if defined(_XM_NO_INTRINSICS_)
- uint32_t ux = (V1.vector4_f32[0] > V2.vector4_f32[0]) ? 0xFFFFFFFFU : 0;
- uint32_t uy = (V1.vector4_f32[1] > V2.vector4_f32[1]) ? 0xFFFFFFFFU : 0;
- uint32_t uz = (V1.vector4_f32[2] > V2.vector4_f32[2]) ? 0xFFFFFFFFU : 0;
- uint32_t uw = (V1.vector4_f32[3] > V2.vector4_f32[3]) ? 0xFFFFFFFFU : 0;
- uint32_t CR = 0;
- if (ux&uy&uz&uw)
- {
- // All elements are greater
- CR = XM_CRMASK_CR6TRUE;
- }
- else if (!(ux|uy|uz|uw))
- {
- // All elements are not greater
- CR = XM_CRMASK_CR6FALSE;
- }
- *pCR = CR;
- XMVECTORU32 Control = { { { ux, uy, uz, uw } } };
- return Control.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t vResult = vcgtq_f32( V1, V2 );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- uint32_t r = vget_lane_u32(vTemp.val[1], 1);
- uint32_t CR = 0;
- if ( r == 0xFFFFFFFFU )
- {
- // All elements are greater
- CR = XM_CRMASK_CR6TRUE;
- }
- else if ( !r )
- {
- // All elements are not greater
- CR = XM_CRMASK_CR6FALSE;
- }
- *pCR = CR;
- return vResult;
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_cmpgt_ps(V1,V2);
- uint32_t CR = 0;
- int iTest = _mm_movemask_ps(vTemp);
- if (iTest==0xf)
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if (!iTest)
- {
- // All elements are not greater
- CR = XM_CRMASK_CR6FALSE;
- }
- *pCR = CR;
- return vTemp;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorGreaterOrEqual
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 Control = { { {
- (V1.vector4_f32[0] >= V2.vector4_f32[0]) ? 0xFFFFFFFF : 0,
- (V1.vector4_f32[1] >= V2.vector4_f32[1]) ? 0xFFFFFFFF : 0,
- (V1.vector4_f32[2] >= V2.vector4_f32[2]) ? 0xFFFFFFFF : 0,
- (V1.vector4_f32[3] >= V2.vector4_f32[3]) ? 0xFFFFFFFF : 0
- } } };
- return Control.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vcgeq_f32( V1, V2 );
- #elif defined(_XM_SSE_INTRINSICS_)
- return _mm_cmpge_ps( V1, V2 );
- #endif
- }
- //------------------------------------------------------------------------------
- _Use_decl_annotations_
- inline XMVECTOR XM_CALLCONV XMVectorGreaterOrEqualR
- (
- uint32_t* pCR,
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- assert( pCR != nullptr );
- #if defined(_XM_NO_INTRINSICS_)
- uint32_t ux = (V1.vector4_f32[0] >= V2.vector4_f32[0]) ? 0xFFFFFFFFU : 0;
- uint32_t uy = (V1.vector4_f32[1] >= V2.vector4_f32[1]) ? 0xFFFFFFFFU : 0;
- uint32_t uz = (V1.vector4_f32[2] >= V2.vector4_f32[2]) ? 0xFFFFFFFFU : 0;
- uint32_t uw = (V1.vector4_f32[3] >= V2.vector4_f32[3]) ? 0xFFFFFFFFU : 0;
- uint32_t CR = 0;
- if (ux&uy&uz&uw)
- {
- // All elements are greater
- CR = XM_CRMASK_CR6TRUE;
- }
- else if (!(ux|uy|uz|uw))
- {
- // All elements are not greater
- CR = XM_CRMASK_CR6FALSE;
- }
- *pCR = CR;
- XMVECTORU32 Control = { { { ux, uy, uz, uw } } };
- return Control.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t vResult = vcgeq_f32( V1, V2 );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- uint32_t r = vget_lane_u32(vTemp.val[1], 1);
- uint32_t CR = 0;
- if ( r == 0xFFFFFFFFU )
- {
- // All elements are greater or equal
- CR = XM_CRMASK_CR6TRUE;
- }
- else if ( !r )
- {
- // All elements are not greater or equal
- CR = XM_CRMASK_CR6FALSE;
- }
- *pCR = CR;
- return vResult;
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_cmpge_ps(V1,V2);
- uint32_t CR = 0;
- int iTest = _mm_movemask_ps(vTemp);
- if (iTest==0xf)
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if (!iTest)
- {
- // All elements are not greater
- CR = XM_CRMASK_CR6FALSE;
- }
- *pCR = CR;
- return vTemp;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorLess
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 Control = { { {
- (V1.vector4_f32[0] < V2.vector4_f32[0]) ? 0xFFFFFFFF : 0,
- (V1.vector4_f32[1] < V2.vector4_f32[1]) ? 0xFFFFFFFF : 0,
- (V1.vector4_f32[2] < V2.vector4_f32[2]) ? 0xFFFFFFFF : 0,
- (V1.vector4_f32[3] < V2.vector4_f32[3]) ? 0xFFFFFFFF : 0
- } } };
- return Control.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vcltq_f32( V1, V2 );
- #elif defined(_XM_SSE_INTRINSICS_)
- return _mm_cmplt_ps( V1, V2 );
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorLessOrEqual
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 Control = { { {
- (V1.vector4_f32[0] <= V2.vector4_f32[0]) ? 0xFFFFFFFF : 0,
- (V1.vector4_f32[1] <= V2.vector4_f32[1]) ? 0xFFFFFFFF : 0,
- (V1.vector4_f32[2] <= V2.vector4_f32[2]) ? 0xFFFFFFFF : 0,
- (V1.vector4_f32[3] <= V2.vector4_f32[3]) ? 0xFFFFFFFF : 0
- } } };
- return Control.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vcleq_f32( V1, V2 );
- #elif defined(_XM_SSE_INTRINSICS_)
- return _mm_cmple_ps( V1, V2 );
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorInBounds
- (
- FXMVECTOR V,
- FXMVECTOR Bounds
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 Control = { { {
- (V.vector4_f32[0] <= Bounds.vector4_f32[0] && V.vector4_f32[0] >= -Bounds.vector4_f32[0]) ? 0xFFFFFFFF : 0,
- (V.vector4_f32[1] <= Bounds.vector4_f32[1] && V.vector4_f32[1] >= -Bounds.vector4_f32[1]) ? 0xFFFFFFFF : 0,
- (V.vector4_f32[2] <= Bounds.vector4_f32[2] && V.vector4_f32[2] >= -Bounds.vector4_f32[2]) ? 0xFFFFFFFF : 0,
- (V.vector4_f32[3] <= Bounds.vector4_f32[3] && V.vector4_f32[3] >= -Bounds.vector4_f32[3]) ? 0xFFFFFFFF : 0
- } } };
- return Control.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Test if less than or equal
- XMVECTOR vTemp1 = vcleq_f32(V,Bounds);
- // Negate the bounds
- XMVECTOR vTemp2 = vnegq_f32(Bounds);
- // Test if greater or equal (Reversed)
- vTemp2 = vcleq_f32(vTemp2,V);
- // Blend answers
- vTemp1 = vandq_u32(vTemp1,vTemp2);
- return vTemp1;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Test if less than or equal
- XMVECTOR vTemp1 = _mm_cmple_ps(V,Bounds);
- // Negate the bounds
- XMVECTOR vTemp2 = _mm_mul_ps(Bounds,g_XMNegativeOne);
- // Test if greater or equal (Reversed)
- vTemp2 = _mm_cmple_ps(vTemp2,V);
- // Blend answers
- vTemp1 = _mm_and_ps(vTemp1,vTemp2);
- return vTemp1;
- #endif
- }
- //------------------------------------------------------------------------------
- _Use_decl_annotations_
- inline XMVECTOR XM_CALLCONV XMVectorInBoundsR
- (
- uint32_t* pCR,
- FXMVECTOR V,
- FXMVECTOR Bounds
- )
- {
- assert( pCR != nullptr );
- #if defined(_XM_NO_INTRINSICS_)
- uint32_t ux = (V.vector4_f32[0] <= Bounds.vector4_f32[0] && V.vector4_f32[0] >= -Bounds.vector4_f32[0]) ? 0xFFFFFFFFU : 0;
- uint32_t uy = (V.vector4_f32[1] <= Bounds.vector4_f32[1] && V.vector4_f32[1] >= -Bounds.vector4_f32[1]) ? 0xFFFFFFFFU : 0;
- uint32_t uz = (V.vector4_f32[2] <= Bounds.vector4_f32[2] && V.vector4_f32[2] >= -Bounds.vector4_f32[2]) ? 0xFFFFFFFFU : 0;
- uint32_t uw = (V.vector4_f32[3] <= Bounds.vector4_f32[3] && V.vector4_f32[3] >= -Bounds.vector4_f32[3]) ? 0xFFFFFFFFU : 0;
- uint32_t CR = 0;
- if (ux&uy&uz&uw)
- {
- // All elements are in bounds
- CR = XM_CRMASK_CR6BOUNDS;
- }
- *pCR = CR;
- XMVECTORU32 Control = { { { ux, uy, uz, uw } } };
- return Control.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Test if less than or equal
- XMVECTOR vTemp1 = vcleq_f32(V,Bounds);
- // Negate the bounds
- XMVECTOR vTemp2 = vnegq_f32(Bounds);
- // Test if greater or equal (Reversed)
- vTemp2 = vcleq_f32(vTemp2,V);
- // Blend answers
- vTemp1 = vandq_u32(vTemp1,vTemp2);
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vTemp1), vget_high_u8(vTemp1));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- uint32_t r = vget_lane_u32(vTemp.val[1], 1);
- uint32_t CR = 0;
- if ( r == 0xFFFFFFFFU )
- {
- // All elements are in bounds
- CR = XM_CRMASK_CR6BOUNDS;
- }
- *pCR = CR;
- return vTemp1;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Test if less than or equal
- XMVECTOR vTemp1 = _mm_cmple_ps(V,Bounds);
- // Negate the bounds
- XMVECTOR vTemp2 = _mm_mul_ps(Bounds,g_XMNegativeOne);
- // Test if greater or equal (Reversed)
- vTemp2 = _mm_cmple_ps(vTemp2,V);
- // Blend answers
- vTemp1 = _mm_and_ps(vTemp1,vTemp2);
- uint32_t CR = 0;
- if (_mm_movemask_ps(vTemp1)==0xf) {
- // All elements are in bounds
- CR = XM_CRMASK_CR6BOUNDS;
- }
- *pCR = CR;
- return vTemp1;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorIsNaN
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 Control = { { {
- XMISNAN(V.vector4_f32[0]) ? 0xFFFFFFFFU : 0,
- XMISNAN(V.vector4_f32[1]) ? 0xFFFFFFFFU : 0,
- XMISNAN(V.vector4_f32[2]) ? 0xFFFFFFFFU : 0,
- XMISNAN(V.vector4_f32[3]) ? 0xFFFFFFFFU : 0
- } } };
- return Control.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Test against itself. NaN is always not equal
- uint32x4_t vTempNan = vceqq_f32( V, V );
- // Flip results
- return vmvnq_u32( vTempNan );
- #elif defined(_XM_SSE_INTRINSICS_)
- // Test against itself. NaN is always not equal
- return _mm_cmpneq_ps(V,V);
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorIsInfinite
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 Control = { { {
- XMISINF(V.vector4_f32[0]) ? 0xFFFFFFFFU : 0,
- XMISINF(V.vector4_f32[1]) ? 0xFFFFFFFFU : 0,
- XMISINF(V.vector4_f32[2]) ? 0xFFFFFFFFU : 0,
- XMISINF(V.vector4_f32[3]) ? 0xFFFFFFFFU : 0
- } } };
- return Control.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Mask off the sign bit
- uint32x4_t vTemp = vandq_u32(V,g_XMAbsMask);
- // Compare to infinity
- vTemp = vceqq_f32(vTemp,g_XMInfinity);
- // If any are infinity, the signs are true.
- return vTemp;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Mask off the sign bit
- __m128 vTemp = _mm_and_ps(V,g_XMAbsMask);
- // Compare to infinity
- vTemp = _mm_cmpeq_ps(vTemp,g_XMInfinity);
- // If any are infinity, the signs are true.
- return vTemp;
- #endif
- }
- //------------------------------------------------------------------------------
- // Rounding and clamping operations
- //------------------------------------------------------------------------------
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorMin
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- (V1.vector4_f32[0] < V2.vector4_f32[0]) ? V1.vector4_f32[0] : V2.vector4_f32[0],
- (V1.vector4_f32[1] < V2.vector4_f32[1]) ? V1.vector4_f32[1] : V2.vector4_f32[1],
- (V1.vector4_f32[2] < V2.vector4_f32[2]) ? V1.vector4_f32[2] : V2.vector4_f32[2],
- (V1.vector4_f32[3] < V2.vector4_f32[3]) ? V1.vector4_f32[3] : V2.vector4_f32[3]
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vminq_f32( V1, V2 );
- #elif defined(_XM_SSE_INTRINSICS_)
- return _mm_min_ps( V1, V2 );
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorMax
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- (V1.vector4_f32[0] > V2.vector4_f32[0]) ? V1.vector4_f32[0] : V2.vector4_f32[0],
- (V1.vector4_f32[1] > V2.vector4_f32[1]) ? V1.vector4_f32[1] : V2.vector4_f32[1],
- (V1.vector4_f32[2] > V2.vector4_f32[2]) ? V1.vector4_f32[2] : V2.vector4_f32[2],
- (V1.vector4_f32[3] > V2.vector4_f32[3]) ? V1.vector4_f32[3] : V2.vector4_f32[3]
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vmaxq_f32( V1, V2 );
- #elif defined(_XM_SSE_INTRINSICS_)
- return _mm_max_ps( V1, V2 );
- #endif
- }
- //------------------------------------------------------------------------------
- namespace Internal
- {
- // Round to nearest (even) a.k.a. banker's rounding
- inline float round_to_nearest( float x )
- {
- float i = floorf(x);
- x -= i;
- if(x < 0.5f)
- return i;
- if(x > 0.5f)
- return i + 1.f;
- float int_part;
- (void)modff( i / 2.f, &int_part );
- if ( (2.f*int_part) == i )
- {
- return i;
- }
- return i + 1.f;
- }
- };
- #if !defined(_XM_NO_INTRINSICS_) && !defined(__clang__)
- #pragma float_control(push)
- #pragma float_control(precise, on)
- #endif
- inline XMVECTOR XM_CALLCONV XMVectorRound
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- Internal::round_to_nearest(V.vector4_f32[0]),
- Internal::round_to_nearest(V.vector4_f32[1]),
- Internal::round_to_nearest(V.vector4_f32[2]),
- Internal::round_to_nearest(V.vector4_f32[3])
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- #if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64)
- return vrndnq_f32(V);
- #else
- uint32x4_t sign = vandq_u32( V, g_XMNegativeZero );
- uint32x4_t sMagic = vorrq_u32( g_XMNoFraction, sign );
- float32x4_t R1 = vaddq_f32( V, sMagic );
- R1 = vsubq_f32( R1, sMagic );
- float32x4_t R2 = vabsq_f32( V );
- uint32x4_t mask = vcleq_f32( R2, g_XMNoFraction );
- XMVECTOR vResult = vbslq_f32( mask, R1, V );
- return vResult;
- #endif
- #elif defined(_XM_SSE4_INTRINSICS_)
- return _mm_round_ps( V, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC );
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128 sign = _mm_and_ps( V, g_XMNegativeZero );
- __m128 sMagic = _mm_or_ps( g_XMNoFraction, sign );
- __m128 R1 = _mm_add_ps( V, sMagic );
- R1 = _mm_sub_ps( R1, sMagic );
- __m128 R2 = _mm_and_ps( V, g_XMAbsMask );
- __m128 mask = _mm_cmple_ps( R2, g_XMNoFraction );
- R2 = _mm_andnot_ps(mask,V);
- R1 = _mm_and_ps(R1,mask);
- XMVECTOR vResult = _mm_xor_ps(R1, R2);
- return vResult;
- #endif
- }
- #if !defined(_XM_NO_INTRINSICS_) && !defined(__clang__)
- #pragma float_control(pop)
- #endif
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorTruncate
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR Result;
- uint32_t i;
- // Avoid C4701
- Result.vector4_f32[0] = 0.0f;
- for (i = 0; i < 4; i++)
- {
- if (XMISNAN(V.vector4_f32[i]))
- {
- Result.vector4_u32[i] = 0x7FC00000;
- }
- else if (fabsf(V.vector4_f32[i]) < 8388608.0f)
- {
- Result.vector4_f32[i] = (float)((int32_t)V.vector4_f32[i]);
- }
- else
- {
- Result.vector4_f32[i] = V.vector4_f32[i];
- }
- }
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- #if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64)
- return vrndq_f32(V);
- #else
- float32x4_t vTest = vabsq_f32( V );
- vTest = vcltq_f32( vTest, g_XMNoFraction );
- int32x4_t vInt = vcvtq_s32_f32( V );
- XMVECTOR vResult = vcvtq_f32_s32( vInt );
- // All numbers less than 8388608 will use the round to int
- // All others, use the ORIGINAL value
- return vbslq_f32( vTest, vResult, V );
- #endif
- #elif defined(_XM_SSE4_INTRINSICS_)
- return _mm_round_ps( V, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC );
- #elif defined(_XM_SSE_INTRINSICS_)
- // To handle NAN, INF and numbers greater than 8388608, use masking
- // Get the abs value
- __m128i vTest = _mm_and_si128(_mm_castps_si128(V),g_XMAbsMask);
- // Test for greater than 8388608 (All floats with NO fractionals, NAN and INF
- vTest = _mm_cmplt_epi32(vTest,g_XMNoFraction);
- // Convert to int and back to float for rounding with truncation
- __m128i vInt = _mm_cvttps_epi32(V);
- // Convert back to floats
- XMVECTOR vResult = _mm_cvtepi32_ps(vInt);
- // All numbers less than 8388608 will use the round to int
- vResult = _mm_and_ps(vResult,_mm_castsi128_ps(vTest));
- // All others, use the ORIGINAL value
- vTest = _mm_andnot_si128(vTest,_mm_castps_si128(V));
- vResult = _mm_or_ps(vResult,_mm_castsi128_ps(vTest));
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorFloor
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- floorf(V.vector4_f32[0]),
- floorf(V.vector4_f32[1]),
- floorf(V.vector4_f32[2]),
- floorf(V.vector4_f32[3])
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- #if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64)
- return vrndmq_f32(V);
- #else
- float32x4_t vTest = vabsq_f32( V );
- vTest = vcltq_f32( vTest, g_XMNoFraction );
- // Truncate
- int32x4_t vInt = vcvtq_s32_f32( V );
- XMVECTOR vResult = vcvtq_f32_s32( vInt );
- XMVECTOR vLarger = vcgtq_f32( vResult, V );
- // 0 -> 0, 0xffffffff -> -1.0f
- vLarger = vcvtq_f32_s32( vLarger );
- vResult = vaddq_f32( vResult, vLarger );
- // All numbers less than 8388608 will use the round to int
- // All others, use the ORIGINAL value
- return vbslq_f32( vTest, vResult, V );
- #endif
- #elif defined(_XM_SSE4_INTRINSICS_)
- return _mm_floor_ps( V );
- #elif defined(_XM_SSE_INTRINSICS_)
- // To handle NAN, INF and numbers greater than 8388608, use masking
- __m128i vTest = _mm_and_si128(_mm_castps_si128(V),g_XMAbsMask);
- vTest = _mm_cmplt_epi32(vTest,g_XMNoFraction);
- // Truncate
- __m128i vInt = _mm_cvttps_epi32(V);
- XMVECTOR vResult = _mm_cvtepi32_ps(vInt);
- __m128 vLarger = _mm_cmpgt_ps( vResult, V );
- // 0 -> 0, 0xffffffff -> -1.0f
- vLarger = _mm_cvtepi32_ps( _mm_castps_si128( vLarger ) );
- vResult = _mm_add_ps( vResult, vLarger );
- // All numbers less than 8388608 will use the round to int
- vResult = _mm_and_ps(vResult,_mm_castsi128_ps(vTest));
- // All others, use the ORIGINAL value
- vTest = _mm_andnot_si128(vTest,_mm_castps_si128(V));
- vResult = _mm_or_ps(vResult,_mm_castsi128_ps(vTest));
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorCeiling
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- ceilf(V.vector4_f32[0]),
- ceilf(V.vector4_f32[1]),
- ceilf(V.vector4_f32[2]),
- ceilf(V.vector4_f32[3])
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- #if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64)
- return vrndpq_f32(V);
- #else
- float32x4_t vTest = vabsq_f32( V );
- vTest = vcltq_f32( vTest, g_XMNoFraction );
- // Truncate
- int32x4_t vInt = vcvtq_s32_f32( V );
- XMVECTOR vResult = vcvtq_f32_s32( vInt );
- XMVECTOR vSmaller = vcltq_f32( vResult, V );
- // 0 -> 0, 0xffffffff -> -1.0f
- vSmaller = vcvtq_f32_s32( vSmaller );
- vResult = vsubq_f32( vResult, vSmaller );
- // All numbers less than 8388608 will use the round to int
- // All others, use the ORIGINAL value
- return vbslq_f32( vTest, vResult, V );
- #endif
- #elif defined(_XM_SSE4_INTRINSICS_)
- return _mm_ceil_ps( V );
- #elif defined(_XM_SSE_INTRINSICS_)
- // To handle NAN, INF and numbers greater than 8388608, use masking
- __m128i vTest = _mm_and_si128(_mm_castps_si128(V),g_XMAbsMask);
- vTest = _mm_cmplt_epi32(vTest,g_XMNoFraction);
- // Truncate
- __m128i vInt = _mm_cvttps_epi32(V);
- XMVECTOR vResult = _mm_cvtepi32_ps(vInt);
- __m128 vSmaller = _mm_cmplt_ps( vResult, V );
- // 0 -> 0, 0xffffffff -> -1.0f
- vSmaller = _mm_cvtepi32_ps( _mm_castps_si128( vSmaller ) );
- vResult = _mm_sub_ps( vResult, vSmaller );
- // All numbers less than 8388608 will use the round to int
- vResult = _mm_and_ps(vResult,_mm_castsi128_ps(vTest));
- // All others, use the ORIGINAL value
- vTest = _mm_andnot_si128(vTest,_mm_castps_si128(V));
- vResult = _mm_or_ps(vResult,_mm_castsi128_ps(vTest));
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorClamp
- (
- FXMVECTOR V,
- FXMVECTOR Min,
- FXMVECTOR Max
- )
- {
- assert(XMVector4LessOrEqual(Min, Max));
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR Result;
- Result = XMVectorMax(Min, V);
- Result = XMVectorMin(Max, Result);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- XMVECTOR vResult;
- vResult = vmaxq_f32(Min,V);
- vResult = vminq_f32(vResult,Max);
- return vResult;
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vResult;
- vResult = _mm_max_ps(Min,V);
- vResult = _mm_min_ps(vResult,Max);
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorSaturate
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- const XMVECTOR Zero = XMVectorZero();
- return XMVectorClamp(V, Zero, g_XMOne.v);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Set <0 to 0
- XMVECTOR vResult = vmaxq_f32(V, vdupq_n_f32(0) );
- // Set>1 to 1
- return vminq_f32(vResult, vdupq_n_f32(1.0f) );
- #elif defined(_XM_SSE_INTRINSICS_)
- // Set <0 to 0
- XMVECTOR vResult = _mm_max_ps(V,g_XMZero);
- // Set>1 to 1
- return _mm_min_ps(vResult,g_XMOne);
- #endif
- }
- //------------------------------------------------------------------------------
- // Bitwise logical operations
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorAndInt
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 Result = { { {
- V1.vector4_u32[0] & V2.vector4_u32[0],
- V1.vector4_u32[1] & V2.vector4_u32[1],
- V1.vector4_u32[2] & V2.vector4_u32[2],
- V1.vector4_u32[3] & V2.vector4_u32[3]
- } } };
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vandq_u32(V1,V2);
- #elif defined(_XM_SSE_INTRINSICS_)
- return _mm_and_ps(V1,V2);
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorAndCInt
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 Result = { { {
- V1.vector4_u32[0] & ~V2.vector4_u32[0],
- V1.vector4_u32[1] & ~V2.vector4_u32[1],
- V1.vector4_u32[2] & ~V2.vector4_u32[2],
- V1.vector4_u32[3] & ~V2.vector4_u32[3]
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vbicq_u32(V1,V2);
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128i V = _mm_andnot_si128( _mm_castps_si128(V2), _mm_castps_si128(V1) );
- return _mm_castsi128_ps(V);
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorOrInt
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 Result = { { {
- V1.vector4_u32[0] | V2.vector4_u32[0],
- V1.vector4_u32[1] | V2.vector4_u32[1],
- V1.vector4_u32[2] | V2.vector4_u32[2],
- V1.vector4_u32[3] | V2.vector4_u32[3]
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vorrq_u32(V1,V2);
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128i V = _mm_or_si128( _mm_castps_si128(V1), _mm_castps_si128(V2) );
- return _mm_castsi128_ps(V);
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorNorInt
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 Result = { { {
- ~(V1.vector4_u32[0] | V2.vector4_u32[0]),
- ~(V1.vector4_u32[1] | V2.vector4_u32[1]),
- ~(V1.vector4_u32[2] | V2.vector4_u32[2]),
- ~(V1.vector4_u32[3] | V2.vector4_u32[3])
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t Result = vorrq_u32(V1,V2);
- return vbicq_u32(g_XMNegOneMask, Result);
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128i Result;
- Result = _mm_or_si128( _mm_castps_si128(V1), _mm_castps_si128(V2) );
- Result = _mm_andnot_si128( Result,g_XMNegOneMask);
- return _mm_castsi128_ps(Result);
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorXorInt
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORU32 Result = { { {
- V1.vector4_u32[0] ^ V2.vector4_u32[0],
- V1.vector4_u32[1] ^ V2.vector4_u32[1],
- V1.vector4_u32[2] ^ V2.vector4_u32[2],
- V1.vector4_u32[3] ^ V2.vector4_u32[3]
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return veorq_u32(V1,V2);
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128i V = _mm_xor_si128( _mm_castps_si128(V1), _mm_castps_si128(V2) );
- return _mm_castsi128_ps(V);
- #endif
- }
- //------------------------------------------------------------------------------
- // Computation operations
- //------------------------------------------------------------------------------
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorNegate
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- -V.vector4_f32[0],
- -V.vector4_f32[1],
- -V.vector4_f32[2],
- -V.vector4_f32[3]
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vnegq_f32(V);
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR Z;
- Z = _mm_setzero_ps();
- return _mm_sub_ps( Z, V );
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorAdd
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- V1.vector4_f32[0] + V2.vector4_f32[0],
- V1.vector4_f32[1] + V2.vector4_f32[1],
- V1.vector4_f32[2] + V2.vector4_f32[2],
- V1.vector4_f32[3] + V2.vector4_f32[3]
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vaddq_f32( V1, V2 );
- #elif defined(_XM_SSE_INTRINSICS_)
- return _mm_add_ps( V1, V2 );
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorSum
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result;
- Result.f[0] =
- Result.f[1] =
- Result.f[2] =
- Result.f[3] = V.vector4_f32[0] + V.vector4_f32[1] + V.vector4_f32[2] + V.vector4_f32[3];
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- #if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64)
- XMVECTOR vTemp = vpaddq_f32(V, V);
- return vpaddq_f32(vTemp,vTemp);
- #else
- float32x2_t v1 = vget_low_f32(V);
- float32x2_t v2 = vget_high_f32(V);
- v1 = vadd_f32(v1, v2);
- v1 = vpadd_f32(v1, v1);
- return vcombine_f32(v1, v1);
- #endif
- #elif defined(_XM_SSE3_INTRINSICS_)
- XMVECTOR vTemp = _mm_hadd_ps(V, V);
- return _mm_hadd_ps(vTemp,vTemp);
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 3, 0, 1));
- XMVECTOR vTemp2 = _mm_add_ps(V, vTemp);
- vTemp = XM_PERMUTE_PS(vTemp2, _MM_SHUFFLE(1, 0, 3, 2));
- return _mm_add_ps(vTemp, vTemp2);
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorAddAngles
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- const XMVECTOR Zero = XMVectorZero();
- // Add the given angles together. If the range of V1 is such
- // that -Pi <= V1 < Pi and the range of V2 is such that
- // -2Pi <= V2 <= 2Pi, then the range of the resulting angle
- // will be -Pi <= Result < Pi.
- XMVECTOR Result = XMVectorAdd(V1, V2);
- XMVECTOR Mask = XMVectorLess(Result, g_XMNegativePi.v);
- XMVECTOR Offset = XMVectorSelect(Zero, g_XMTwoPi.v, Mask);
- Mask = XMVectorGreaterOrEqual(Result, g_XMPi.v);
- Offset = XMVectorSelect(Offset, g_XMNegativeTwoPi.v, Mask);
- Result = XMVectorAdd(Result, Offset);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Adjust the angles
- XMVECTOR vResult = vaddq_f32(V1,V2);
- // Less than Pi?
- uint32x4_t vOffset = vcltq_f32(vResult,g_XMNegativePi);
- vOffset = vandq_u32(vOffset,g_XMTwoPi);
- // Add 2Pi to all entries less than -Pi
- vResult = vaddq_f32(vResult,vOffset);
- // Greater than or equal to Pi?
- vOffset = vcgeq_f32(vResult,g_XMPi);
- vOffset = vandq_u32(vOffset,g_XMTwoPi);
- // Sub 2Pi to all entries greater than Pi
- vResult = vsubq_f32(vResult,vOffset);
- return vResult;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Adjust the angles
- XMVECTOR vResult = _mm_add_ps(V1,V2);
- // Less than Pi?
- XMVECTOR vOffset = _mm_cmplt_ps(vResult,g_XMNegativePi);
- vOffset = _mm_and_ps(vOffset,g_XMTwoPi);
- // Add 2Pi to all entries less than -Pi
- vResult = _mm_add_ps(vResult,vOffset);
- // Greater than or equal to Pi?
- vOffset = _mm_cmpge_ps(vResult,g_XMPi);
- vOffset = _mm_and_ps(vOffset,g_XMTwoPi);
- // Sub 2Pi to all entries greater than Pi
- vResult = _mm_sub_ps(vResult,vOffset);
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorSubtract
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- V1.vector4_f32[0] - V2.vector4_f32[0],
- V1.vector4_f32[1] - V2.vector4_f32[1],
- V1.vector4_f32[2] - V2.vector4_f32[2],
- V1.vector4_f32[3] - V2.vector4_f32[3]
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vsubq_f32( V1, V2 );
- #elif defined(_XM_SSE_INTRINSICS_)
- return _mm_sub_ps( V1, V2 );
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorSubtractAngles
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- const XMVECTOR Zero = XMVectorZero();
- // Subtract the given angles. If the range of V1 is such
- // that -Pi <= V1 < Pi and the range of V2 is such that
- // -2Pi <= V2 <= 2Pi, then the range of the resulting angle
- // will be -Pi <= Result < Pi.
- XMVECTOR Result = XMVectorSubtract(V1, V2);
- XMVECTOR Mask = XMVectorLess(Result, g_XMNegativePi.v);
- XMVECTOR Offset = XMVectorSelect(Zero, g_XMTwoPi.v, Mask);
- Mask = XMVectorGreaterOrEqual(Result, g_XMPi.v);
- Offset = XMVectorSelect(Offset, g_XMNegativeTwoPi.v, Mask);
- Result = XMVectorAdd(Result, Offset);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Adjust the angles
- XMVECTOR vResult = vsubq_f32(V1,V2);
- // Less than Pi?
- uint32x4_t vOffset = vcltq_f32(vResult,g_XMNegativePi);
- vOffset = vandq_u32(vOffset,g_XMTwoPi);
- // Add 2Pi to all entries less than -Pi
- vResult = vaddq_f32(vResult,vOffset);
- // Greater than or equal to Pi?
- vOffset = vcgeq_f32(vResult,g_XMPi);
- vOffset = vandq_u32(vOffset,g_XMTwoPi);
- // Sub 2Pi to all entries greater than Pi
- vResult = vsubq_f32(vResult,vOffset);
- return vResult;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Adjust the angles
- XMVECTOR vResult = _mm_sub_ps(V1,V2);
- // Less than Pi?
- XMVECTOR vOffset = _mm_cmplt_ps(vResult,g_XMNegativePi);
- vOffset = _mm_and_ps(vOffset,g_XMTwoPi);
- // Add 2Pi to all entries less than -Pi
- vResult = _mm_add_ps(vResult,vOffset);
- // Greater than or equal to Pi?
- vOffset = _mm_cmpge_ps(vResult,g_XMPi);
- vOffset = _mm_and_ps(vOffset,g_XMTwoPi);
- // Sub 2Pi to all entries greater than Pi
- vResult = _mm_sub_ps(vResult,vOffset);
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorMultiply
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- V1.vector4_f32[0] * V2.vector4_f32[0],
- V1.vector4_f32[1] * V2.vector4_f32[1],
- V1.vector4_f32[2] * V2.vector4_f32[2],
- V1.vector4_f32[3] * V2.vector4_f32[3]
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vmulq_f32( V1, V2 );
- #elif defined(_XM_SSE_INTRINSICS_)
- return _mm_mul_ps( V1, V2 );
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorMultiplyAdd
- (
- FXMVECTOR V1,
- FXMVECTOR V2,
- FXMVECTOR V3
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- V1.vector4_f32[0] * V2.vector4_f32[0] + V3.vector4_f32[0],
- V1.vector4_f32[1] * V2.vector4_f32[1] + V3.vector4_f32[1],
- V1.vector4_f32[2] * V2.vector4_f32[2] + V3.vector4_f32[2],
- V1.vector4_f32[3] * V2.vector4_f32[3] + V3.vector4_f32[3]
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vmlaq_f32( V3, V1, V2 );
- #elif defined(_XM_FMA3_INTRINSICS_)
- return _mm_fmadd_ps( V1, V2, V3 );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vResult = _mm_mul_ps( V1, V2 );
- return _mm_add_ps(vResult, V3 );
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorDivide
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- V1.vector4_f32[0] / V2.vector4_f32[0],
- V1.vector4_f32[1] / V2.vector4_f32[1],
- V1.vector4_f32[2] / V2.vector4_f32[2],
- V1.vector4_f32[3] / V2.vector4_f32[3]
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- #if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64)
- return vdivq_f32( V1, V2 );
- #else
- // 2 iterations of Newton-Raphson refinement of reciprocal
- float32x4_t Reciprocal = vrecpeq_f32(V2);
- float32x4_t S = vrecpsq_f32( Reciprocal, V2 );
- Reciprocal = vmulq_f32( S, Reciprocal );
- S = vrecpsq_f32( Reciprocal, V2 );
- Reciprocal = vmulq_f32( S, Reciprocal );
- return vmulq_f32( V1, Reciprocal );
- #endif
- #elif defined(_XM_SSE_INTRINSICS_)
- return _mm_div_ps( V1, V2 );
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorNegativeMultiplySubtract
- (
- FXMVECTOR V1,
- FXMVECTOR V2,
- FXMVECTOR V3
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- V3.vector4_f32[0] - (V1.vector4_f32[0] * V2.vector4_f32[0]),
- V3.vector4_f32[1] - (V1.vector4_f32[1] * V2.vector4_f32[1]),
- V3.vector4_f32[2] - (V1.vector4_f32[2] * V2.vector4_f32[2]),
- V3.vector4_f32[3] - (V1.vector4_f32[3] * V2.vector4_f32[3])
- } } };
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vmlsq_f32( V3, V1, V2 );
- #elif defined(_XM_FMA3_INTRINSICS_)
- return _mm_fnmadd_ps(V1, V2, V3);
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR R = _mm_mul_ps( V1, V2 );
- return _mm_sub_ps( V3, R );
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorScale
- (
- FXMVECTOR V,
- float ScaleFactor
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- V.vector4_f32[0] * ScaleFactor,
- V.vector4_f32[1] * ScaleFactor,
- V.vector4_f32[2] * ScaleFactor,
- V.vector4_f32[3] * ScaleFactor
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vmulq_n_f32( V, ScaleFactor );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vResult = _mm_set_ps1(ScaleFactor);
- return _mm_mul_ps(vResult,V);
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorReciprocalEst
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- 1.f / V.vector4_f32[0],
- 1.f / V.vector4_f32[1],
- 1.f / V.vector4_f32[2],
- 1.f / V.vector4_f32[3]
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vrecpeq_f32(V);
- #elif defined(_XM_SSE_INTRINSICS_)
- return _mm_rcp_ps(V);
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorReciprocal
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- 1.f / V.vector4_f32[0],
- 1.f / V.vector4_f32[1],
- 1.f / V.vector4_f32[2],
- 1.f / V.vector4_f32[3]
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- #if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64)
- float32x4_t one = vdupq_n_f32(1.0f);
- return vdivq_f32(one,V);
- #else
- // 2 iterations of Newton-Raphson refinement
- float32x4_t Reciprocal = vrecpeq_f32(V);
- float32x4_t S = vrecpsq_f32( Reciprocal, V );
- Reciprocal = vmulq_f32( S, Reciprocal );
- S = vrecpsq_f32( Reciprocal, V );
- return vmulq_f32( S, Reciprocal );
- #endif
- #elif defined(_XM_SSE_INTRINSICS_)
- return _mm_div_ps(g_XMOne,V);
- #endif
- }
- //------------------------------------------------------------------------------
- // Return an estimated square root
- inline XMVECTOR XM_CALLCONV XMVectorSqrtEst
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- sqrtf(V.vector4_f32[0]),
- sqrtf(V.vector4_f32[1]),
- sqrtf(V.vector4_f32[2]),
- sqrtf(V.vector4_f32[3])
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // 1 iteration of Newton-Raphson refinment of sqrt
- float32x4_t S0 = vrsqrteq_f32(V);
- float32x4_t P0 = vmulq_f32( V, S0 );
- float32x4_t R0 = vrsqrtsq_f32( P0, S0 );
- float32x4_t S1 = vmulq_f32( S0, R0 );
- XMVECTOR VEqualsInfinity = XMVectorEqualInt(V, g_XMInfinity.v);
- XMVECTOR VEqualsZero = XMVectorEqual(V, vdupq_n_f32(0) );
- XMVECTOR Result = vmulq_f32( V, S1 );
- XMVECTOR Select = XMVectorEqualInt(VEqualsInfinity, VEqualsZero);
- return XMVectorSelect(V, Result, Select);
- #elif defined(_XM_SSE_INTRINSICS_)
- return _mm_sqrt_ps(V);
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorSqrt
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- sqrtf(V.vector4_f32[0]),
- sqrtf(V.vector4_f32[1]),
- sqrtf(V.vector4_f32[2]),
- sqrtf(V.vector4_f32[3])
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // 3 iterations of Newton-Raphson refinment of sqrt
- float32x4_t S0 = vrsqrteq_f32(V);
- float32x4_t P0 = vmulq_f32( V, S0 );
- float32x4_t R0 = vrsqrtsq_f32( P0, S0 );
- float32x4_t S1 = vmulq_f32( S0, R0 );
- float32x4_t P1 = vmulq_f32( V, S1 );
- float32x4_t R1 = vrsqrtsq_f32( P1, S1 );
- float32x4_t S2 = vmulq_f32( S1, R1 );
- float32x4_t P2 = vmulq_f32( V, S2 );
- float32x4_t R2 = vrsqrtsq_f32( P2, S2 );
- float32x4_t S3 = vmulq_f32( S2, R2 );
- XMVECTOR VEqualsInfinity = XMVectorEqualInt(V, g_XMInfinity.v);
- XMVECTOR VEqualsZero = XMVectorEqual(V, vdupq_n_f32(0) );
- XMVECTOR Result = vmulq_f32( V, S3 );
- XMVECTOR Select = XMVectorEqualInt(VEqualsInfinity, VEqualsZero);
- return XMVectorSelect(V, Result, Select);
- #elif defined(_XM_SSE_INTRINSICS_)
- return _mm_sqrt_ps(V);
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorReciprocalSqrtEst
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- 1.f / sqrtf(V.vector4_f32[0]),
- 1.f / sqrtf(V.vector4_f32[1]),
- 1.f / sqrtf(V.vector4_f32[2]),
- 1.f / sqrtf(V.vector4_f32[3])
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vrsqrteq_f32(V);
- #elif defined(_XM_SSE_INTRINSICS_)
- return _mm_rsqrt_ps(V);
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorReciprocalSqrt
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- 1.f / sqrtf(V.vector4_f32[0]),
- 1.f / sqrtf(V.vector4_f32[1]),
- 1.f / sqrtf(V.vector4_f32[2]),
- 1.f / sqrtf(V.vector4_f32[3])
- } } };
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // 2 iterations of Newton-Raphson refinement of reciprocal
- float32x4_t S0 = vrsqrteq_f32(V);
- float32x4_t P0 = vmulq_f32( V, S0 );
- float32x4_t R0 = vrsqrtsq_f32( P0, S0 );
- float32x4_t S1 = vmulq_f32( S0, R0 );
- float32x4_t P1 = vmulq_f32( V, S1 );
- float32x4_t R1 = vrsqrtsq_f32( P1, S1 );
- return vmulq_f32( S1, R1 );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vResult = _mm_sqrt_ps(V);
- vResult = _mm_div_ps(g_XMOne,vResult);
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorExp2
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- powf(2.0f, V.vector4_f32[0]),
- powf(2.0f, V.vector4_f32[1]),
- powf(2.0f, V.vector4_f32[2]),
- powf(2.0f, V.vector4_f32[3])
- } } };
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- int32x4_t itrunc = vcvtq_s32_f32(V);
- float32x4_t ftrunc = vcvtq_f32_s32(itrunc);
- float32x4_t y = vsubq_f32(V, ftrunc);
- float32x4_t poly = vmlaq_f32( g_XMExpEst6, g_XMExpEst7, y );
- poly = vmlaq_f32( g_XMExpEst5, poly, y );
- poly = vmlaq_f32( g_XMExpEst4, poly, y );
- poly = vmlaq_f32( g_XMExpEst3, poly, y );
- poly = vmlaq_f32( g_XMExpEst2, poly, y );
- poly = vmlaq_f32( g_XMExpEst1, poly, y );
- poly = vmlaq_f32( g_XMOne, poly, y );
- int32x4_t biased = vaddq_s32(itrunc, g_XMExponentBias);
- biased = vshlq_n_s32(biased, 23);
- float32x4_t result0 = XMVectorDivide(biased, poly);
- biased = vaddq_s32(itrunc, g_XM253);
- biased = vshlq_n_s32(biased, 23);
- float32x4_t result1 = XMVectorDivide(biased, poly);
- result1 = vmulq_f32(g_XMMinNormal.v, result1);
- // Use selection to handle the cases
- // if (V is NaN) -> QNaN;
- // else if (V sign bit set)
- // if (V > -150)
- // if (V.exponent < -126) -> result1
- // else -> result0
- // else -> +0
- // else
- // if (V < 128) -> result0
- // else -> +inf
- int32x4_t comp = vcltq_s32( V, g_XMBin128);
- float32x4_t result2 = vbslq_f32( comp, result0, g_XMInfinity );
- comp = vcltq_s32(itrunc, g_XMSubnormalExponent);
- float32x4_t result3 = vbslq_f32( comp, result1, result0 );
- comp = vcltq_s32(V, g_XMBinNeg150);
- float32x4_t result4 = vbslq_f32( comp, result3, g_XMZero );
- int32x4_t sign = vandq_s32(V, g_XMNegativeZero);
- comp = vceqq_s32(sign, g_XMNegativeZero);
- float32x4_t result5 = vbslq_f32( comp, result4, result2 );
- int32x4_t t0 = vandq_s32(V, g_XMQNaNTest);
- int32x4_t t1 = vandq_s32(V, g_XMInfinity);
- t0 = vceqq_s32(t0, g_XMZero);
- t1 = vceqq_s32(t1, g_XMInfinity);
- int32x4_t isNaN = vbicq_s32( t1,t0);
- float32x4_t vResult = vbslq_f32( isNaN, g_XMQNaN, result5 );
- return vResult;
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128i itrunc = _mm_cvttps_epi32(V);
- __m128 ftrunc = _mm_cvtepi32_ps(itrunc);
- __m128 y = _mm_sub_ps(V, ftrunc);
- __m128 poly = _mm_mul_ps(g_XMExpEst7, y);
- poly = _mm_add_ps(g_XMExpEst6, poly);
- poly = _mm_mul_ps(poly, y);
- poly = _mm_add_ps(g_XMExpEst5, poly);
- poly = _mm_mul_ps(poly, y);
- poly = _mm_add_ps(g_XMExpEst4, poly);
- poly = _mm_mul_ps(poly, y);
- poly = _mm_add_ps(g_XMExpEst3, poly);
- poly = _mm_mul_ps(poly, y);
- poly = _mm_add_ps(g_XMExpEst2, poly);
- poly = _mm_mul_ps(poly, y);
- poly = _mm_add_ps(g_XMExpEst1, poly);
- poly = _mm_mul_ps(poly, y);
- poly = _mm_add_ps(g_XMOne, poly);
- __m128i biased = _mm_add_epi32(itrunc, g_XMExponentBias);
- biased = _mm_slli_epi32(biased, 23);
- __m128 result0 = _mm_div_ps(_mm_castsi128_ps(biased), poly);
- biased = _mm_add_epi32(itrunc, g_XM253);
- biased = _mm_slli_epi32(biased, 23);
- __m128 result1 = _mm_div_ps(_mm_castsi128_ps(biased), poly);
- result1 = _mm_mul_ps(g_XMMinNormal.v, result1);
- // Use selection to handle the cases
- // if (V is NaN) -> QNaN;
- // else if (V sign bit set)
- // if (V > -150)
- // if (V.exponent < -126) -> result1
- // else -> result0
- // else -> +0
- // else
- // if (V < 128) -> result0
- // else -> +inf
- __m128i comp = _mm_cmplt_epi32( _mm_castps_si128(V), g_XMBin128);
- __m128i select0 = _mm_and_si128(comp, _mm_castps_si128(result0));
- __m128i select1 = _mm_andnot_si128(comp, g_XMInfinity);
- __m128i result2 = _mm_or_si128(select0, select1);
- comp = _mm_cmplt_epi32(itrunc, g_XMSubnormalExponent);
- select1 = _mm_and_si128(comp, _mm_castps_si128(result1));
- select0 = _mm_andnot_si128(comp, _mm_castps_si128(result0));
- __m128i result3 = _mm_or_si128(select0, select1);
- comp = _mm_cmplt_epi32(_mm_castps_si128(V), g_XMBinNeg150);
- select0 = _mm_and_si128(comp, result3);
- select1 = _mm_andnot_si128(comp, g_XMZero);
- __m128i result4 = _mm_or_si128(select0, select1);
- __m128i sign = _mm_and_si128(_mm_castps_si128(V), g_XMNegativeZero);
- comp = _mm_cmpeq_epi32(sign, g_XMNegativeZero);
- select0 = _mm_and_si128(comp, result4);
- select1 = _mm_andnot_si128(comp, result2);
- __m128i result5 = _mm_or_si128(select0, select1);
- __m128i t0 = _mm_and_si128(_mm_castps_si128(V), g_XMQNaNTest);
- __m128i t1 = _mm_and_si128(_mm_castps_si128(V), g_XMInfinity);
- t0 = _mm_cmpeq_epi32(t0, g_XMZero);
- t1 = _mm_cmpeq_epi32(t1, g_XMInfinity);
- __m128i isNaN = _mm_andnot_si128(t0, t1);
- select0 = _mm_and_si128(isNaN, g_XMQNaN);
- select1 = _mm_andnot_si128(isNaN, result5);
- __m128i vResult = _mm_or_si128(select0, select1);
- return _mm_castsi128_ps(vResult);
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorExpE
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- expf(V.vector4_f32[0]),
- expf(V.vector4_f32[1]),
- expf(V.vector4_f32[2]),
- expf(V.vector4_f32[3])
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // expE(V) = exp2(vin*log2(e))
- float32x4_t Ve = vmulq_f32(g_XMLgE, V);
- int32x4_t itrunc = vcvtq_s32_f32(Ve);
- float32x4_t ftrunc = vcvtq_f32_s32(itrunc);
- float32x4_t y = vsubq_f32(Ve, ftrunc);
- float32x4_t poly = vmlaq_f32( g_XMExpEst6, g_XMExpEst7, y );
- poly = vmlaq_f32( g_XMExpEst5, poly, y );
- poly = vmlaq_f32( g_XMExpEst4, poly, y );
- poly = vmlaq_f32( g_XMExpEst3, poly, y );
- poly = vmlaq_f32( g_XMExpEst2, poly, y );
- poly = vmlaq_f32( g_XMExpEst1, poly, y );
- poly = vmlaq_f32( g_XMOne, poly, y );
- int32x4_t biased = vaddq_s32(itrunc, g_XMExponentBias);
- biased = vshlq_n_s32(biased, 23);
- float32x4_t result0 = XMVectorDivide(biased, poly);
- biased = vaddq_s32(itrunc, g_XM253);
- biased = vshlq_n_s32(biased, 23);
- float32x4_t result1 = XMVectorDivide(biased, poly);
- result1 = vmulq_f32(g_XMMinNormal.v, result1);
- // Use selection to handle the cases
- // if (V is NaN) -> QNaN;
- // else if (V sign bit set)
- // if (V > -150)
- // if (V.exponent < -126) -> result1
- // else -> result0
- // else -> +0
- // else
- // if (V < 128) -> result0
- // else -> +inf
- int32x4_t comp = vcltq_s32( Ve, g_XMBin128);
- float32x4_t result2 = vbslq_f32( comp, result0, g_XMInfinity );
- comp = vcltq_s32(itrunc, g_XMSubnormalExponent);
- float32x4_t result3 = vbslq_f32( comp, result1, result0 );
- comp = vcltq_s32(Ve, g_XMBinNeg150);
- float32x4_t result4 = vbslq_f32( comp, result3, g_XMZero );
- int32x4_t sign = vandq_s32(Ve, g_XMNegativeZero);
- comp = vceqq_s32(sign, g_XMNegativeZero);
- float32x4_t result5 = vbslq_f32( comp, result4, result2 );
- int32x4_t t0 = vandq_s32(Ve, g_XMQNaNTest);
- int32x4_t t1 = vandq_s32(Ve, g_XMInfinity);
- t0 = vceqq_s32(t0, g_XMZero);
- t1 = vceqq_s32(t1, g_XMInfinity);
- int32x4_t isNaN = vbicq_s32( t1,t0);
- float32x4_t vResult = vbslq_f32( isNaN, g_XMQNaN, result5 );
- return vResult;
- #elif defined(_XM_SSE_INTRINSICS_)
- // expE(V) = exp2(vin*log2(e))
- __m128 Ve = _mm_mul_ps(g_XMLgE, V);
- __m128i itrunc = _mm_cvttps_epi32(Ve);
- __m128 ftrunc = _mm_cvtepi32_ps(itrunc);
- __m128 y = _mm_sub_ps(Ve, ftrunc);
- __m128 poly = _mm_mul_ps(g_XMExpEst7, y);
- poly = _mm_add_ps(g_XMExpEst6, poly);
- poly = _mm_mul_ps(poly, y);
- poly = _mm_add_ps(g_XMExpEst5, poly);
- poly = _mm_mul_ps(poly, y);
- poly = _mm_add_ps(g_XMExpEst4, poly);
- poly = _mm_mul_ps(poly, y);
- poly = _mm_add_ps(g_XMExpEst3, poly);
- poly = _mm_mul_ps(poly, y);
- poly = _mm_add_ps(g_XMExpEst2, poly);
- poly = _mm_mul_ps(poly, y);
- poly = _mm_add_ps(g_XMExpEst1, poly);
- poly = _mm_mul_ps(poly, y);
- poly = _mm_add_ps(g_XMOne, poly);
- __m128i biased = _mm_add_epi32(itrunc, g_XMExponentBias);
- biased = _mm_slli_epi32(biased, 23);
- __m128 result0 = _mm_div_ps(_mm_castsi128_ps(biased), poly);
- biased = _mm_add_epi32(itrunc, g_XM253);
- biased = _mm_slli_epi32(biased, 23);
- __m128 result1 = _mm_div_ps(_mm_castsi128_ps(biased), poly);
- result1 = _mm_mul_ps(g_XMMinNormal.v, result1);
- // Use selection to handle the cases
- // if (V is NaN) -> QNaN;
- // else if (V sign bit set)
- // if (V > -150)
- // if (V.exponent < -126) -> result1
- // else -> result0
- // else -> +0
- // else
- // if (V < 128) -> result0
- // else -> +inf
- __m128i comp = _mm_cmplt_epi32( _mm_castps_si128(Ve), g_XMBin128);
- __m128i select0 = _mm_and_si128(comp, _mm_castps_si128(result0));
- __m128i select1 = _mm_andnot_si128(comp, g_XMInfinity);
- __m128i result2 = _mm_or_si128(select0, select1);
- comp = _mm_cmplt_epi32(itrunc, g_XMSubnormalExponent);
- select1 = _mm_and_si128(comp, _mm_castps_si128(result1));
- select0 = _mm_andnot_si128(comp, _mm_castps_si128(result0));
- __m128i result3 = _mm_or_si128(select0, select1);
- comp = _mm_cmplt_epi32(_mm_castps_si128(Ve), g_XMBinNeg150);
- select0 = _mm_and_si128(comp, result3);
- select1 = _mm_andnot_si128(comp, g_XMZero);
- __m128i result4 = _mm_or_si128(select0, select1);
- __m128i sign = _mm_and_si128(_mm_castps_si128(Ve), g_XMNegativeZero);
- comp = _mm_cmpeq_epi32(sign, g_XMNegativeZero);
- select0 = _mm_and_si128(comp, result4);
- select1 = _mm_andnot_si128(comp, result2);
- __m128i result5 = _mm_or_si128(select0, select1);
- __m128i t0 = _mm_and_si128(_mm_castps_si128(Ve), g_XMQNaNTest);
- __m128i t1 = _mm_and_si128(_mm_castps_si128(Ve), g_XMInfinity);
- t0 = _mm_cmpeq_epi32(t0, g_XMZero);
- t1 = _mm_cmpeq_epi32(t1, g_XMInfinity);
- __m128i isNaN = _mm_andnot_si128(t0, t1);
- select0 = _mm_and_si128(isNaN, g_XMQNaN);
- select1 = _mm_andnot_si128(isNaN, result5);
- __m128i vResult = _mm_or_si128(select0, select1);
- return _mm_castsi128_ps(vResult);
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorExp
- (
- FXMVECTOR V
- )
- {
- return XMVectorExp2(V);
- }
- //------------------------------------------------------------------------------
- #if defined(_XM_SSE_INTRINSICS_)
- namespace Internal
- {
- inline __m128i multi_sll_epi32(__m128i value, __m128i count)
- {
- __m128i v = _mm_shuffle_epi32(value, _MM_SHUFFLE(0,0,0,0));
- __m128i c = _mm_shuffle_epi32(count, _MM_SHUFFLE(0,0,0,0));
- c = _mm_and_si128(c, g_XMMaskX);
- __m128i r0 = _mm_sll_epi32(v, c);
- v = _mm_shuffle_epi32(value, _MM_SHUFFLE(1,1,1,1));
- c = _mm_shuffle_epi32(count, _MM_SHUFFLE(1,1,1,1));
- c = _mm_and_si128(c, g_XMMaskX);
- __m128i r1 = _mm_sll_epi32(v, c);
- v = _mm_shuffle_epi32(value, _MM_SHUFFLE(2,2,2,2));
- c = _mm_shuffle_epi32(count, _MM_SHUFFLE(2,2,2,2));
- c = _mm_and_si128(c, g_XMMaskX);
- __m128i r2 = _mm_sll_epi32(v, c);
- v = _mm_shuffle_epi32(value, _MM_SHUFFLE(3,3,3,3));
- c = _mm_shuffle_epi32(count, _MM_SHUFFLE(3,3,3,3));
- c = _mm_and_si128(c, g_XMMaskX);
- __m128i r3 = _mm_sll_epi32(v, c);
- // (r0,r0,r1,r1)
- __m128 r01 = _mm_shuffle_ps(_mm_castsi128_ps(r0), _mm_castsi128_ps(r1), _MM_SHUFFLE(0,0,0,0));
- // (r2,r2,r3,r3)
- __m128 r23 = _mm_shuffle_ps(_mm_castsi128_ps(r2), _mm_castsi128_ps(r3), _MM_SHUFFLE(0,0,0,0));
- // (r0,r1,r2,r3)
- __m128 result = _mm_shuffle_ps(r01, r23, _MM_SHUFFLE(2,0,2,0));
- return _mm_castps_si128(result);
- }
- inline __m128i multi_srl_epi32(__m128i value, __m128i count)
- {
- __m128i v = _mm_shuffle_epi32(value, _MM_SHUFFLE(0,0,0,0));
- __m128i c = _mm_shuffle_epi32(count, _MM_SHUFFLE(0,0,0,0));
- c = _mm_and_si128(c, g_XMMaskX);
- __m128i r0 = _mm_srl_epi32(v, c);
- v = _mm_shuffle_epi32(value, _MM_SHUFFLE(1,1,1,1));
- c = _mm_shuffle_epi32(count, _MM_SHUFFLE(1,1,1,1));
- c = _mm_and_si128(c, g_XMMaskX);
- __m128i r1 = _mm_srl_epi32(v, c);
- v = _mm_shuffle_epi32(value, _MM_SHUFFLE(2,2,2,2));
- c = _mm_shuffle_epi32(count, _MM_SHUFFLE(2,2,2,2));
- c = _mm_and_si128(c, g_XMMaskX);
- __m128i r2 = _mm_srl_epi32(v, c);
- v = _mm_shuffle_epi32(value, _MM_SHUFFLE(3,3,3,3));
- c = _mm_shuffle_epi32(count, _MM_SHUFFLE(3,3,3,3));
- c = _mm_and_si128(c, g_XMMaskX);
- __m128i r3 = _mm_srl_epi32(v, c);
- // (r0,r0,r1,r1)
- __m128 r01 = _mm_shuffle_ps(_mm_castsi128_ps(r0), _mm_castsi128_ps(r1), _MM_SHUFFLE(0,0,0,0));
- // (r2,r2,r3,r3)
- __m128 r23 = _mm_shuffle_ps(_mm_castsi128_ps(r2), _mm_castsi128_ps(r3), _MM_SHUFFLE(0,0,0,0));
- // (r0,r1,r2,r3)
- __m128 result = _mm_shuffle_ps(r01, r23, _MM_SHUFFLE(2,0,2,0));
- return _mm_castps_si128(result);
- }
- inline __m128i GetLeadingBit(const __m128i value)
- {
- static const XMVECTORI32 g_XM0000FFFF = { { { 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF } } };
- static const XMVECTORI32 g_XM000000FF = { { { 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF } } };
- static const XMVECTORI32 g_XM0000000F = { { { 0x0000000F, 0x0000000F, 0x0000000F, 0x0000000F } } };
- static const XMVECTORI32 g_XM00000003 = { { { 0x00000003, 0x00000003, 0x00000003, 0x00000003 } } };
- __m128i v = value, r, c, b, s;
- c = _mm_cmpgt_epi32(v, g_XM0000FFFF); // c = (v > 0xFFFF)
- b = _mm_srli_epi32(c, 31); // b = (c ? 1 : 0)
- r = _mm_slli_epi32(b, 4); // r = (b << 4)
- v = multi_srl_epi32(v, r); // v = (v >> r)
- c = _mm_cmpgt_epi32(v, g_XM000000FF); // c = (v > 0xFF)
- b = _mm_srli_epi32(c, 31); // b = (c ? 1 : 0)
- s = _mm_slli_epi32(b, 3); // s = (b << 3)
- v = multi_srl_epi32(v, s); // v = (v >> s)
- r = _mm_or_si128(r, s); // r = (r | s)
- c = _mm_cmpgt_epi32(v, g_XM0000000F); // c = (v > 0xF)
- b = _mm_srli_epi32(c, 31); // b = (c ? 1 : 0)
- s = _mm_slli_epi32(b, 2); // s = (b << 2)
- v = multi_srl_epi32(v, s); // v = (v >> s)
- r = _mm_or_si128(r, s); // r = (r | s)
- c = _mm_cmpgt_epi32(v, g_XM00000003); // c = (v > 0x3)
- b = _mm_srli_epi32(c, 31); // b = (c ? 1 : 0)
- s = _mm_slli_epi32(b, 1); // s = (b << 1)
- v = multi_srl_epi32(v, s); // v = (v >> s)
- r = _mm_or_si128(r, s); // r = (r | s)
- s = _mm_srli_epi32(v, 1);
- r = _mm_or_si128(r, s);
- return r;
- }
- } // namespace Internal
- #endif // _XM_SSE_INTRINSICS_
- #if defined(_XM_ARM_NEON_INTRINSICS_)
- namespace Internal
- {
- inline int32x4_t GetLeadingBit(const int32x4_t value)
- {
- static const XMVECTORI32 g_XM0000FFFF = { { { 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF } } };
- static const XMVECTORI32 g_XM000000FF = { { { 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF } } };
- static const XMVECTORI32 g_XM0000000F = { { { 0x0000000F, 0x0000000F, 0x0000000F, 0x0000000F } } };
- static const XMVECTORI32 g_XM00000003 = { { { 0x00000003, 0x00000003, 0x00000003, 0x00000003 } } };
- int32x4_t v = value, r, c, b, s;
- c = vcgtq_s32(v, g_XM0000FFFF); // c = (v > 0xFFFF)
- b = vshrq_n_u32(c, 31); // b = (c ? 1 : 0)
- r = vshlq_n_s32(b, 4); // r = (b << 4)
- r = vnegq_s32( r );
- v = vshlq_u32( v, r ); // v = (v >> r)
-
- c = vcgtq_s32(v, g_XM000000FF); // c = (v > 0xFF)
- b = vshrq_n_u32(c, 31); // b = (c ? 1 : 0)
- s = vshlq_n_s32(b, 3); // s = (b << 3)
- s = vnegq_s32( s );
- v = vshlq_u32(v, s); // v = (v >> s)
- r = vorrq_s32(r, s); // r = (r | s)
- c = vcgtq_s32(v, g_XM0000000F); // c = (v > 0xF)
- b = vshrq_n_u32(c, 31); // b = (c ? 1 : 0)
- s = vshlq_n_s32(b, 2); // s = (b << 2)
- s = vnegq_s32( s );
- v = vshlq_u32(v, s); // v = (v >> s)
- r = vorrq_s32(r, s); // r = (r | s)
- c = vcgtq_s32(v, g_XM00000003); // c = (v > 0x3)
- b = vshrq_n_u32(c, 31); // b = (c ? 1 : 0)
- s = vshlq_n_s32(b, 1); // s = (b << 1)
- s = vnegq_s32( s );
- v = vshlq_u32(v, s); // v = (v >> s)
- r = vorrq_s32(r, s); // r = (r | s)
- s = vshrq_n_u32(v, 1);
- r = vorrq_s32(r, s);
- return r;
- }
- } // namespace Internal
- #endif
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorLog2
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- const float fScale = 1.4426950f; // (1.0f / logf(2.0f));
- XMVECTORF32 Result = { { {
- logf(V.vector4_f32[0])*fScale,
- logf(V.vector4_f32[1])*fScale,
- logf(V.vector4_f32[2])*fScale,
- logf(V.vector4_f32[3])*fScale
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- int32x4_t rawBiased = vandq_s32(V, g_XMInfinity);
- int32x4_t trailing = vandq_s32(V, g_XMQNaNTest);
- int32x4_t isExponentZero = vceqq_s32(g_XMZero, rawBiased);
- // Compute exponent and significand for normals.
- int32x4_t biased = vshrq_n_u32(rawBiased, 23);
- int32x4_t exponentNor = vsubq_s32(biased, g_XMExponentBias);
- int32x4_t trailingNor = trailing;
- // Compute exponent and significand for subnormals.
- int32x4_t leading = Internal::GetLeadingBit(trailing);
- int32x4_t shift = vsubq_s32(g_XMNumTrailing, leading);
- int32x4_t exponentSub = vsubq_s32(g_XMSubnormalExponent, shift);
- int32x4_t trailingSub = vshlq_u32(trailing, shift);
- trailingSub = vandq_s32(trailingSub, g_XMQNaNTest);
- int32x4_t e = vbslq_f32( isExponentZero, exponentSub, exponentNor );
- int32x4_t t = vbslq_f32( isExponentZero, trailingSub, trailingNor );
- // Compute the approximation.
- int32x4_t tmp = vorrq_s32(g_XMOne, t);
- float32x4_t y = vsubq_f32(tmp, g_XMOne);
- float32x4_t log2 = vmlaq_f32( g_XMLogEst6, g_XMLogEst7, y );
- log2 = vmlaq_f32( g_XMLogEst5, log2, y );
- log2 = vmlaq_f32( g_XMLogEst4, log2, y );
- log2 = vmlaq_f32( g_XMLogEst3, log2, y );
- log2 = vmlaq_f32( g_XMLogEst2, log2, y );
- log2 = vmlaq_f32( g_XMLogEst1, log2, y );
- log2 = vmlaq_f32( g_XMLogEst0, log2, y );
- log2 = vmlaq_f32( vcvtq_f32_s32(e), log2, y );
- // if (x is NaN) -> QNaN
- // else if (V is positive)
- // if (V is infinite) -> +inf
- // else -> log2(V)
- // else
- // if (V is zero) -> -inf
- // else -> -QNaN
- int32x4_t isInfinite = vandq_s32((V), g_XMAbsMask);
- isInfinite = vceqq_s32(isInfinite, g_XMInfinity);
- int32x4_t isGreaterZero = vcgtq_s32((V), g_XMZero);
- int32x4_t isNotFinite = vcgtq_s32((V), g_XMInfinity);
- int32x4_t isPositive = vbicq_s32( isGreaterZero,isNotFinite);
- int32x4_t isZero = vandq_s32((V), g_XMAbsMask);
- isZero = vceqq_s32(isZero, g_XMZero);
- int32x4_t t0 = vandq_s32((V), g_XMQNaNTest);
- int32x4_t t1 = vandq_s32((V), g_XMInfinity);
- t0 = vceqq_s32(t0, g_XMZero);
- t1 = vceqq_s32(t1, g_XMInfinity);
- int32x4_t isNaN = vbicq_s32( t1,t0);
- float32x4_t result = vbslq_f32( isInfinite, g_XMInfinity, log2 );
- tmp = vbslq_f32( isZero, g_XMNegInfinity, g_XMNegQNaN );
- result = vbslq_f32(isPositive, result, tmp);
- result = vbslq_f32(isNaN, g_XMQNaN, result );
- return result;
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128i rawBiased = _mm_and_si128(_mm_castps_si128(V), g_XMInfinity);
- __m128i trailing = _mm_and_si128(_mm_castps_si128(V), g_XMQNaNTest);
- __m128i isExponentZero = _mm_cmpeq_epi32(g_XMZero, rawBiased);
- // Compute exponent and significand for normals.
- __m128i biased = _mm_srli_epi32(rawBiased, 23);
- __m128i exponentNor = _mm_sub_epi32(biased, g_XMExponentBias);
- __m128i trailingNor = trailing;
- // Compute exponent and significand for subnormals.
- __m128i leading = Internal::GetLeadingBit(trailing);
- __m128i shift = _mm_sub_epi32(g_XMNumTrailing, leading);
- __m128i exponentSub = _mm_sub_epi32(g_XMSubnormalExponent, shift);
- __m128i trailingSub = Internal::multi_sll_epi32(trailing, shift);
- trailingSub = _mm_and_si128(trailingSub, g_XMQNaNTest);
- __m128i select0 = _mm_and_si128(isExponentZero, exponentSub);
- __m128i select1 = _mm_andnot_si128(isExponentZero, exponentNor);
- __m128i e = _mm_or_si128(select0, select1);
- select0 = _mm_and_si128(isExponentZero, trailingSub);
- select1 = _mm_andnot_si128(isExponentZero, trailingNor);
- __m128i t = _mm_or_si128(select0, select1);
- // Compute the approximation.
- __m128i tmp = _mm_or_si128(g_XMOne, t);
- __m128 y = _mm_sub_ps(_mm_castsi128_ps(tmp), g_XMOne);
- __m128 log2 = _mm_mul_ps(g_XMLogEst7, y);
- log2 = _mm_add_ps(g_XMLogEst6, log2);
- log2 = _mm_mul_ps(log2, y);
- log2 = _mm_add_ps(g_XMLogEst5, log2);
- log2 = _mm_mul_ps(log2, y);
- log2 = _mm_add_ps(g_XMLogEst4, log2);
- log2 = _mm_mul_ps(log2, y);
- log2 = _mm_add_ps(g_XMLogEst3, log2);
- log2 = _mm_mul_ps(log2, y);
- log2 = _mm_add_ps(g_XMLogEst2, log2);
- log2 = _mm_mul_ps(log2, y);
- log2 = _mm_add_ps(g_XMLogEst1, log2);
- log2 = _mm_mul_ps(log2, y);
- log2 = _mm_add_ps(g_XMLogEst0, log2);
- log2 = _mm_mul_ps(log2, y);
- log2 = _mm_add_ps(log2, _mm_cvtepi32_ps(e));
- // if (x is NaN) -> QNaN
- // else if (V is positive)
- // if (V is infinite) -> +inf
- // else -> log2(V)
- // else
- // if (V is zero) -> -inf
- // else -> -QNaN
- __m128i isInfinite = _mm_and_si128(_mm_castps_si128(V), g_XMAbsMask);
- isInfinite = _mm_cmpeq_epi32(isInfinite, g_XMInfinity);
- __m128i isGreaterZero = _mm_cmpgt_epi32(_mm_castps_si128(V), g_XMZero);
- __m128i isNotFinite = _mm_cmpgt_epi32(_mm_castps_si128(V), g_XMInfinity);
- __m128i isPositive = _mm_andnot_si128(isNotFinite, isGreaterZero);
- __m128i isZero = _mm_and_si128(_mm_castps_si128(V), g_XMAbsMask);
- isZero = _mm_cmpeq_epi32(isZero, g_XMZero);
- __m128i t0 = _mm_and_si128(_mm_castps_si128(V), g_XMQNaNTest);
- __m128i t1 = _mm_and_si128(_mm_castps_si128(V), g_XMInfinity);
- t0 = _mm_cmpeq_epi32(t0, g_XMZero);
- t1 = _mm_cmpeq_epi32(t1, g_XMInfinity);
- __m128i isNaN = _mm_andnot_si128(t0, t1);
- select0 = _mm_and_si128(isInfinite, g_XMInfinity);
- select1 = _mm_andnot_si128(isInfinite, _mm_castps_si128(log2));
- __m128i result = _mm_or_si128(select0, select1);
- select0 = _mm_and_si128(isZero, g_XMNegInfinity);
- select1 = _mm_andnot_si128(isZero, g_XMNegQNaN);
- tmp = _mm_or_si128(select0, select1);
- select0 = _mm_and_si128(isPositive, result);
- select1 = _mm_andnot_si128(isPositive, tmp);
- result = _mm_or_si128(select0, select1);
- select0 = _mm_and_si128(isNaN, g_XMQNaN);
- select1 = _mm_andnot_si128(isNaN, result);
- result = _mm_or_si128(select0, select1);
- return _mm_castsi128_ps(result);
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorLogE
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- logf(V.vector4_f32[0]),
- logf(V.vector4_f32[1]),
- logf(V.vector4_f32[2]),
- logf(V.vector4_f32[3])
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- int32x4_t rawBiased = vandq_s32(V, g_XMInfinity);
- int32x4_t trailing = vandq_s32(V, g_XMQNaNTest);
- int32x4_t isExponentZero = vceqq_s32(g_XMZero, rawBiased);
- // Compute exponent and significand for normals.
- int32x4_t biased = vshrq_n_u32(rawBiased, 23);
- int32x4_t exponentNor = vsubq_s32(biased, g_XMExponentBias);
- int32x4_t trailingNor = trailing;
- // Compute exponent and significand for subnormals.
- int32x4_t leading = Internal::GetLeadingBit(trailing);
- int32x4_t shift = vsubq_s32(g_XMNumTrailing, leading);
- int32x4_t exponentSub = vsubq_s32(g_XMSubnormalExponent, shift);
- int32x4_t trailingSub = vshlq_u32(trailing, shift);
- trailingSub = vandq_s32(trailingSub, g_XMQNaNTest);
- int32x4_t e = vbslq_f32( isExponentZero, exponentSub, exponentNor );
- int32x4_t t = vbslq_f32( isExponentZero, trailingSub, trailingNor );
- // Compute the approximation.
- int32x4_t tmp = vorrq_s32(g_XMOne, t);
- float32x4_t y = vsubq_f32(tmp, g_XMOne);
- float32x4_t log2 = vmlaq_f32( g_XMLogEst6, g_XMLogEst7, y );
- log2 = vmlaq_f32( g_XMLogEst5, log2, y );
- log2 = vmlaq_f32( g_XMLogEst4, log2, y );
- log2 = vmlaq_f32( g_XMLogEst3, log2, y );
- log2 = vmlaq_f32( g_XMLogEst2, log2, y );
- log2 = vmlaq_f32( g_XMLogEst1, log2, y );
- log2 = vmlaq_f32( g_XMLogEst0, log2, y );
- log2 = vmlaq_f32( vcvtq_f32_s32(e), log2, y );
- log2 = vmulq_f32(g_XMInvLgE, log2);
- // if (x is NaN) -> QNaN
- // else if (V is positive)
- // if (V is infinite) -> +inf
- // else -> log2(V)
- // else
- // if (V is zero) -> -inf
- // else -> -QNaN
- int32x4_t isInfinite = vandq_s32((V), g_XMAbsMask);
- isInfinite = vceqq_s32(isInfinite, g_XMInfinity);
- int32x4_t isGreaterZero = vcgtq_s32((V), g_XMZero);
- int32x4_t isNotFinite = vcgtq_s32((V), g_XMInfinity);
- int32x4_t isPositive = vbicq_s32( isGreaterZero,isNotFinite);
- int32x4_t isZero = vandq_s32((V), g_XMAbsMask);
- isZero = vceqq_s32(isZero, g_XMZero);
- int32x4_t t0 = vandq_s32((V), g_XMQNaNTest);
- int32x4_t t1 = vandq_s32((V), g_XMInfinity);
- t0 = vceqq_s32(t0, g_XMZero);
- t1 = vceqq_s32(t1, g_XMInfinity);
- int32x4_t isNaN = vbicq_s32( t1,t0);
- float32x4_t result = vbslq_f32( isInfinite, g_XMInfinity, log2 );
- tmp = vbslq_f32( isZero, g_XMNegInfinity, g_XMNegQNaN );
- result = vbslq_f32(isPositive, result, tmp);
- result = vbslq_f32(isNaN, g_XMQNaN, result );
- return result;
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128i rawBiased = _mm_and_si128(_mm_castps_si128(V), g_XMInfinity);
- __m128i trailing = _mm_and_si128(_mm_castps_si128(V), g_XMQNaNTest);
- __m128i isExponentZero = _mm_cmpeq_epi32(g_XMZero, rawBiased);
- // Compute exponent and significand for normals.
- __m128i biased = _mm_srli_epi32(rawBiased, 23);
- __m128i exponentNor = _mm_sub_epi32(biased, g_XMExponentBias);
- __m128i trailingNor = trailing;
- // Compute exponent and significand for subnormals.
- __m128i leading = Internal::GetLeadingBit(trailing);
- __m128i shift = _mm_sub_epi32(g_XMNumTrailing, leading);
- __m128i exponentSub = _mm_sub_epi32(g_XMSubnormalExponent, shift);
- __m128i trailingSub = Internal::multi_sll_epi32(trailing, shift);
- trailingSub = _mm_and_si128(trailingSub, g_XMQNaNTest);
- __m128i select0 = _mm_and_si128(isExponentZero, exponentSub);
- __m128i select1 = _mm_andnot_si128(isExponentZero, exponentNor);
- __m128i e = _mm_or_si128(select0, select1);
- select0 = _mm_and_si128(isExponentZero, trailingSub);
- select1 = _mm_andnot_si128(isExponentZero, trailingNor);
- __m128i t = _mm_or_si128(select0, select1);
- // Compute the approximation.
- __m128i tmp = _mm_or_si128(g_XMOne, t);
- __m128 y = _mm_sub_ps(_mm_castsi128_ps(tmp), g_XMOne);
- __m128 log2 = _mm_mul_ps(g_XMLogEst7, y);
- log2 = _mm_add_ps(g_XMLogEst6, log2);
- log2 = _mm_mul_ps(log2, y);
- log2 = _mm_add_ps(g_XMLogEst5, log2);
- log2 = _mm_mul_ps(log2, y);
- log2 = _mm_add_ps(g_XMLogEst4, log2);
- log2 = _mm_mul_ps(log2, y);
- log2 = _mm_add_ps(g_XMLogEst3, log2);
- log2 = _mm_mul_ps(log2, y);
- log2 = _mm_add_ps(g_XMLogEst2, log2);
- log2 = _mm_mul_ps(log2, y);
- log2 = _mm_add_ps(g_XMLogEst1, log2);
- log2 = _mm_mul_ps(log2, y);
- log2 = _mm_add_ps(g_XMLogEst0, log2);
- log2 = _mm_mul_ps(log2, y);
- log2 = _mm_add_ps(log2, _mm_cvtepi32_ps(e));
- log2 = _mm_mul_ps(g_XMInvLgE, log2);
- // if (x is NaN) -> QNaN
- // else if (V is positive)
- // if (V is infinite) -> +inf
- // else -> log2(V)
- // else
- // if (V is zero) -> -inf
- // else -> -QNaN
- __m128i isInfinite = _mm_and_si128(_mm_castps_si128(V), g_XMAbsMask);
- isInfinite = _mm_cmpeq_epi32(isInfinite, g_XMInfinity);
- __m128i isGreaterZero = _mm_cmpgt_epi32(_mm_castps_si128(V), g_XMZero);
- __m128i isNotFinite = _mm_cmpgt_epi32(_mm_castps_si128(V), g_XMInfinity);
- __m128i isPositive = _mm_andnot_si128(isNotFinite, isGreaterZero);
- __m128i isZero = _mm_and_si128(_mm_castps_si128(V), g_XMAbsMask);
- isZero = _mm_cmpeq_epi32(isZero, g_XMZero);
- __m128i t0 = _mm_and_si128(_mm_castps_si128(V), g_XMQNaNTest);
- __m128i t1 = _mm_and_si128(_mm_castps_si128(V), g_XMInfinity);
- t0 = _mm_cmpeq_epi32(t0, g_XMZero);
- t1 = _mm_cmpeq_epi32(t1, g_XMInfinity);
- __m128i isNaN = _mm_andnot_si128(t0, t1);
- select0 = _mm_and_si128(isInfinite, g_XMInfinity);
- select1 = _mm_andnot_si128(isInfinite, _mm_castps_si128(log2));
- __m128i result = _mm_or_si128(select0, select1);
- select0 = _mm_and_si128(isZero, g_XMNegInfinity);
- select1 = _mm_andnot_si128(isZero, g_XMNegQNaN);
- tmp = _mm_or_si128(select0, select1);
- select0 = _mm_and_si128(isPositive, result);
- select1 = _mm_andnot_si128(isPositive, tmp);
- result = _mm_or_si128(select0, select1);
- select0 = _mm_and_si128(isNaN, g_XMQNaN);
- select1 = _mm_andnot_si128(isNaN, result);
- result = _mm_or_si128(select0, select1);
- return _mm_castsi128_ps(result);
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorLog
- (
- FXMVECTOR V
- )
- {
- return XMVectorLog2(V);
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorPow
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- powf(V1.vector4_f32[0], V2.vector4_f32[0]),
- powf(V1.vector4_f32[1], V2.vector4_f32[1]),
- powf(V1.vector4_f32[2], V2.vector4_f32[2]),
- powf(V1.vector4_f32[3], V2.vector4_f32[3])
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- XMVECTORF32 vResult = { { {
- powf(vgetq_lane_f32(V1, 0), vgetq_lane_f32(V2, 0)),
- powf(vgetq_lane_f32(V1, 1), vgetq_lane_f32(V2, 1)),
- powf(vgetq_lane_f32(V1, 2), vgetq_lane_f32(V2, 2)),
- powf(vgetq_lane_f32(V1, 3), vgetq_lane_f32(V2, 3))
- } } };
- return vResult.v;
- #elif defined(_XM_SSE_INTRINSICS_)
- __declspec(align(16)) float a[4];
- __declspec(align(16)) float b[4];
- _mm_store_ps( a, V1 );
- _mm_store_ps( b, V2 );
- XMVECTOR vResult = _mm_setr_ps(
- powf(a[0],b[0]),
- powf(a[1],b[1]),
- powf(a[2],b[2]),
- powf(a[3],b[3]));
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorAbs
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 vResult = { { {
- fabsf(V.vector4_f32[0]),
- fabsf(V.vector4_f32[1]),
- fabsf(V.vector4_f32[2]),
- fabsf(V.vector4_f32[3])
- } } };
- return vResult.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- return vabsq_f32( V );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vResult = _mm_setzero_ps();
- vResult = _mm_sub_ps(vResult,V);
- vResult = _mm_max_ps(vResult,V);
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorMod
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- // V1 % V2 = V1 - V2 * truncate(V1 / V2)
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR Quotient = XMVectorDivide(V1, V2);
- Quotient = XMVectorTruncate(Quotient);
- XMVECTOR Result = XMVectorNegativeMultiplySubtract(V2, Quotient, V1);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- XMVECTOR vResult = XMVectorDivide(V1, V2);
- vResult = XMVectorTruncate(vResult);
- return vmlsq_f32( V1, vResult, V2 );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vResult = _mm_div_ps(V1, V2);
- vResult = XMVectorTruncate(vResult);
- vResult = _mm_mul_ps(vResult,V2);
- vResult = _mm_sub_ps(V1,vResult);
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorModAngles
- (
- FXMVECTOR Angles
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR V;
- XMVECTOR Result;
- // Modulo the range of the given angles such that -XM_PI <= Angles < XM_PI
- V = XMVectorMultiply(Angles, g_XMReciprocalTwoPi.v);
- V = XMVectorRound(V);
- Result = XMVectorNegativeMultiplySubtract(g_XMTwoPi.v, V, Angles);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Modulo the range of the given angles such that -XM_PI <= Angles < XM_PI
- XMVECTOR vResult = vmulq_f32(Angles,g_XMReciprocalTwoPi);
- // Use the inline function due to complexity for rounding
- vResult = XMVectorRound(vResult);
- return vmlsq_f32( Angles, vResult, g_XMTwoPi );
- #elif defined(_XM_SSE_INTRINSICS_)
- // Modulo the range of the given angles such that -XM_PI <= Angles < XM_PI
- XMVECTOR vResult = _mm_mul_ps(Angles,g_XMReciprocalTwoPi);
- // Use the inline function due to complexity for rounding
- vResult = XMVectorRound(vResult);
- vResult = _mm_mul_ps(vResult,g_XMTwoPi);
- vResult = _mm_sub_ps(Angles,vResult);
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorSin
- (
- FXMVECTOR V
- )
- {
- // 11-degree minimax approximation
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- sinf(V.vector4_f32[0]),
- sinf(V.vector4_f32[1]),
- sinf(V.vector4_f32[2]),
- sinf(V.vector4_f32[3])
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Force the value within the bounds of pi
- XMVECTOR x = XMVectorModAngles(V);
- // Map in [-pi/2,pi/2] with sin(y) = sin(x).
- uint32x4_t sign = vandq_u32(x, g_XMNegativeZero);
- uint32x4_t c = vorrq_u32(g_XMPi, sign); // pi when x >= 0, -pi when x < 0
- float32x4_t absx = vabsq_f32( x );
- float32x4_t rflx = vsubq_f32(c, x);
- uint32x4_t comp = vcleq_f32(absx, g_XMHalfPi);
- x = vbslq_f32( comp, x, rflx );
- float32x4_t x2 = vmulq_f32(x, x);
- // Compute polynomial approximation
- const XMVECTOR SC1 = g_XMSinCoefficients1;
- const XMVECTOR SC0 = g_XMSinCoefficients0;
- XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(SC0), 1);
- XMVECTOR Result = vmlaq_lane_f32(vConstants, x2, vget_low_f32(SC1), 0);
- vConstants = vdupq_lane_f32(vget_high_f32(SC0), 0);
- Result = vmlaq_f32(vConstants, Result, x2);
- vConstants = vdupq_lane_f32(vget_low_f32(SC0), 1);
- Result = vmlaq_f32(vConstants, Result, x2);
- vConstants = vdupq_lane_f32(vget_low_f32(SC0), 0);
- Result = vmlaq_f32(vConstants, Result, x2);
- Result = vmlaq_f32(g_XMOne, Result, x2);
- Result = vmulq_f32(Result, x);
- return Result;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Force the value within the bounds of pi
- XMVECTOR x = XMVectorModAngles(V);
- // Map in [-pi/2,pi/2] with sin(y) = sin(x).
- __m128 sign = _mm_and_ps(x, g_XMNegativeZero);
- __m128 c = _mm_or_ps(g_XMPi, sign); // pi when x >= 0, -pi when x < 0
- __m128 absx = _mm_andnot_ps(sign, x); // |x|
- __m128 rflx = _mm_sub_ps(c, x);
- __m128 comp = _mm_cmple_ps(absx, g_XMHalfPi);
- __m128 select0 = _mm_and_ps(comp, x);
- __m128 select1 = _mm_andnot_ps(comp, rflx);
- x = _mm_or_ps(select0, select1);
- __m128 x2 = _mm_mul_ps(x, x);
- // Compute polynomial approximation
- const XMVECTOR SC1 = g_XMSinCoefficients1;
- XMVECTOR vConstants = XM_PERMUTE_PS( SC1, _MM_SHUFFLE(0, 0, 0, 0) );
- __m128 Result = _mm_mul_ps(vConstants, x2);
- const XMVECTOR SC0 = g_XMSinCoefficients0;
- vConstants = XM_PERMUTE_PS( SC0, _MM_SHUFFLE(3, 3, 3, 3) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- vConstants = XM_PERMUTE_PS( SC0, _MM_SHUFFLE(2, 2, 2, 2) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- vConstants = XM_PERMUTE_PS( SC0, _MM_SHUFFLE(1, 1, 1, 1) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- vConstants = XM_PERMUTE_PS( SC0, _MM_SHUFFLE(0, 0, 0, 0) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- Result = _mm_add_ps(Result, g_XMOne);
- Result = _mm_mul_ps(Result, x);
- return Result;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorCos
- (
- FXMVECTOR V
- )
- {
- // 10-degree minimax approximation
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- cosf(V.vector4_f32[0]),
- cosf(V.vector4_f32[1]),
- cosf(V.vector4_f32[2]),
- cosf(V.vector4_f32[3])
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Map V to x in [-pi,pi].
- XMVECTOR x = XMVectorModAngles(V);
- // Map in [-pi/2,pi/2] with cos(y) = sign*cos(x).
- uint32x4_t sign = vandq_u32(x, g_XMNegativeZero);
- uint32x4_t c = vorrq_u32(g_XMPi, sign); // pi when x >= 0, -pi when x < 0
- float32x4_t absx = vabsq_f32( x );
- float32x4_t rflx = vsubq_f32(c, x);
- uint32x4_t comp = vcleq_f32(absx, g_XMHalfPi);
- x = vbslq_f32( comp, x, rflx );
- sign = vbslq_f32( comp, g_XMOne, g_XMNegativeOne );
- float32x4_t x2 = vmulq_f32(x, x);
- // Compute polynomial approximation
- const XMVECTOR CC1 = g_XMCosCoefficients1;
- const XMVECTOR CC0 = g_XMCosCoefficients0;
- XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(CC0), 1);
- XMVECTOR Result = vmlaq_lane_f32(vConstants, x2, vget_low_f32(CC1), 0 );
- vConstants = vdupq_lane_f32(vget_high_f32(CC0), 0);
- Result = vmlaq_f32(vConstants, Result, x2);
- vConstants = vdupq_lane_f32(vget_low_f32(CC0), 1);
- Result = vmlaq_f32(vConstants, Result, x2);
- vConstants = vdupq_lane_f32(vget_low_f32(CC0), 0);
- Result = vmlaq_f32(vConstants, Result, x2);
- Result = vmlaq_f32(g_XMOne, Result, x2);
- Result = vmulq_f32(Result, sign);
- return Result;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Map V to x in [-pi,pi].
- XMVECTOR x = XMVectorModAngles(V);
- // Map in [-pi/2,pi/2] with cos(y) = sign*cos(x).
- XMVECTOR sign = _mm_and_ps(x, g_XMNegativeZero);
- __m128 c = _mm_or_ps(g_XMPi, sign); // pi when x >= 0, -pi when x < 0
- __m128 absx = _mm_andnot_ps(sign, x); // |x|
- __m128 rflx = _mm_sub_ps(c, x);
- __m128 comp = _mm_cmple_ps(absx, g_XMHalfPi);
- __m128 select0 = _mm_and_ps(comp, x);
- __m128 select1 = _mm_andnot_ps(comp, rflx);
- x = _mm_or_ps(select0, select1);
- select0 = _mm_and_ps(comp, g_XMOne);
- select1 = _mm_andnot_ps(comp, g_XMNegativeOne);
- sign = _mm_or_ps(select0, select1);
- __m128 x2 = _mm_mul_ps(x, x);
- // Compute polynomial approximation
- const XMVECTOR CC1 = g_XMCosCoefficients1;
- XMVECTOR vConstants = XM_PERMUTE_PS( CC1, _MM_SHUFFLE(0, 0, 0, 0) );
- __m128 Result = _mm_mul_ps(vConstants, x2);
- const XMVECTOR CC0 = g_XMCosCoefficients0;
- vConstants = XM_PERMUTE_PS( CC0, _MM_SHUFFLE(3, 3, 3, 3) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- vConstants = XM_PERMUTE_PS( CC0, _MM_SHUFFLE(2, 2, 2, 2) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- vConstants = XM_PERMUTE_PS( CC0, _MM_SHUFFLE(1, 1, 1, 1) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- vConstants = XM_PERMUTE_PS( CC0, _MM_SHUFFLE(0, 0, 0, 0) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- Result = _mm_add_ps(Result, g_XMOne);
- Result = _mm_mul_ps(Result, sign);
- return Result;
- #endif
- }
- //------------------------------------------------------------------------------
- _Use_decl_annotations_
- inline void XM_CALLCONV XMVectorSinCos
- (
- XMVECTOR* pSin,
- XMVECTOR* pCos,
- FXMVECTOR V
- )
- {
- assert(pSin != nullptr);
- assert(pCos != nullptr);
- // 11/10-degree minimax approximation
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Sin = { { {
- sinf(V.vector4_f32[0]),
- sinf(V.vector4_f32[1]),
- sinf(V.vector4_f32[2]),
- sinf(V.vector4_f32[3])
- } } };
- XMVECTORF32 Cos = { { {
- cosf(V.vector4_f32[0]),
- cosf(V.vector4_f32[1]),
- cosf(V.vector4_f32[2]),
- cosf(V.vector4_f32[3])
- } } };
- *pSin = Sin.v;
- *pCos = Cos.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Force the value within the bounds of pi
- XMVECTOR x = XMVectorModAngles(V);
- // Map in [-pi/2,pi/2] with cos(y) = sign*cos(x).
- uint32x4_t sign = vandq_u32(x, g_XMNegativeZero);
- uint32x4_t c = vorrq_u32(g_XMPi, sign); // pi when x >= 0, -pi when x < 0
- float32x4_t absx = vabsq_f32( x );
- float32x4_t rflx = vsubq_f32(c, x);
- uint32x4_t comp = vcleq_f32(absx, g_XMHalfPi);
- x = vbslq_f32( comp, x, rflx );
- sign = vbslq_f32( comp, g_XMOne, g_XMNegativeOne );
- float32x4_t x2 = vmulq_f32(x, x);
- // Compute polynomial approximation for sine
- const XMVECTOR SC1 = g_XMSinCoefficients1;
- const XMVECTOR SC0 = g_XMSinCoefficients0;
- XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(SC0), 1);
- XMVECTOR Result = vmlaq_lane_f32(vConstants, x2, vget_low_f32(SC1), 0);
- vConstants = vdupq_lane_f32(vget_high_f32(SC0), 0);
- Result = vmlaq_f32(vConstants, Result, x2);
- vConstants = vdupq_lane_f32(vget_low_f32(SC0), 1);
- Result = vmlaq_f32(vConstants, Result, x2);
- vConstants = vdupq_lane_f32(vget_low_f32(SC0), 0);
- Result = vmlaq_f32(vConstants, Result, x2);
- Result = vmlaq_f32(g_XMOne, Result, x2);
- *pSin = vmulq_f32(Result, x);
- // Compute polynomial approximation for cosine
- const XMVECTOR CC1 = g_XMCosCoefficients1;
- const XMVECTOR CC0 = g_XMCosCoefficients0;
- vConstants = vdupq_lane_f32(vget_high_f32(CC0), 1);
- Result = vmlaq_lane_f32(vConstants, x2, vget_low_f32(CC1), 0);
- vConstants = vdupq_lane_f32(vget_high_f32(CC0), 0);
- Result = vmlaq_f32(vConstants, Result, x2);
- vConstants = vdupq_lane_f32(vget_low_f32(CC0), 1);
- Result = vmlaq_f32(vConstants, Result, x2);
- vConstants = vdupq_lane_f32(vget_low_f32(CC0), 0);
- Result = vmlaq_f32(vConstants, Result, x2);
- Result = vmlaq_f32(g_XMOne, Result, x2);
- *pCos = vmulq_f32(Result, sign);
- #elif defined(_XM_SSE_INTRINSICS_)
- // Force the value within the bounds of pi
- XMVECTOR x = XMVectorModAngles(V);
- // Map in [-pi/2,pi/2] with sin(y) = sin(x), cos(y) = sign*cos(x).
- XMVECTOR sign = _mm_and_ps(x, g_XMNegativeZero);
- __m128 c = _mm_or_ps(g_XMPi, sign); // pi when x >= 0, -pi when x < 0
- __m128 absx = _mm_andnot_ps(sign, x); // |x|
- __m128 rflx = _mm_sub_ps(c, x);
- __m128 comp = _mm_cmple_ps(absx, g_XMHalfPi);
- __m128 select0 = _mm_and_ps(comp, x);
- __m128 select1 = _mm_andnot_ps(comp, rflx);
- x = _mm_or_ps(select0, select1);
- select0 = _mm_and_ps(comp, g_XMOne);
- select1 = _mm_andnot_ps(comp, g_XMNegativeOne);
- sign = _mm_or_ps(select0, select1);
- __m128 x2 = _mm_mul_ps(x, x);
- // Compute polynomial approximation of sine
- const XMVECTOR SC1 = g_XMSinCoefficients1;
- XMVECTOR vConstants = XM_PERMUTE_PS( SC1, _MM_SHUFFLE(0, 0, 0, 0) );
- __m128 Result = _mm_mul_ps(vConstants, x2);
- const XMVECTOR SC0 = g_XMSinCoefficients0;
- vConstants = XM_PERMUTE_PS( SC0, _MM_SHUFFLE(3, 3, 3, 3) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- vConstants = XM_PERMUTE_PS( SC0, _MM_SHUFFLE(2, 2, 2, 2) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- vConstants = XM_PERMUTE_PS( SC0, _MM_SHUFFLE(1, 1, 1, 1) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- vConstants = XM_PERMUTE_PS( SC0, _MM_SHUFFLE(0, 0, 0, 0) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- Result = _mm_add_ps(Result, g_XMOne);
- Result = _mm_mul_ps(Result, x);
- *pSin = Result;
- // Compute polynomial approximation of cosine
- const XMVECTOR CC1 = g_XMCosCoefficients1;
- vConstants = XM_PERMUTE_PS( CC1, _MM_SHUFFLE(0, 0, 0, 0) );
- Result = _mm_mul_ps(vConstants, x2);
- const XMVECTOR CC0 = g_XMCosCoefficients0;
- vConstants = XM_PERMUTE_PS( CC0, _MM_SHUFFLE(3, 3, 3, 3) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- vConstants = XM_PERMUTE_PS( CC0, _MM_SHUFFLE(2, 2, 2, 2) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- vConstants = XM_PERMUTE_PS( CC0, _MM_SHUFFLE(1, 1, 1, 1) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- vConstants = XM_PERMUTE_PS( CC0, _MM_SHUFFLE(0, 0, 0, 0) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- Result = _mm_add_ps(Result, g_XMOne);
- Result = _mm_mul_ps(Result, sign);
- *pCos = Result;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorTan
- (
- FXMVECTOR V
- )
- {
- // Cody and Waite algorithm to compute tangent.
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- tanf(V.vector4_f32[0]),
- tanf(V.vector4_f32[1]),
- tanf(V.vector4_f32[2]),
- tanf(V.vector4_f32[3])
- } } };
- return Result.v;
- #elif defined(_XM_SSE_INTRINSICS_) || defined(_XM_ARM_NEON_INTRINSICS_)
- static const XMVECTORF32 TanCoefficients0 = { { { 1.0f, -4.667168334e-1f, 2.566383229e-2f, -3.118153191e-4f } } };
- static const XMVECTORF32 TanCoefficients1 = { { { 4.981943399e-7f, -1.333835001e-1f, 3.424887824e-3f, -1.786170734e-5f } } };
- static const XMVECTORF32 TanConstants = { { { 1.570796371f, 6.077100628e-11f, 0.000244140625f, 0.63661977228f /*2 / Pi*/ } } };
- static const XMVECTORU32 Mask = { { { 0x1, 0x1, 0x1, 0x1 } } };
- XMVECTOR TwoDivPi = XMVectorSplatW(TanConstants.v);
- XMVECTOR Zero = XMVectorZero();
- XMVECTOR C0 = XMVectorSplatX(TanConstants.v);
- XMVECTOR C1 = XMVectorSplatY(TanConstants.v);
- XMVECTOR Epsilon = XMVectorSplatZ(TanConstants.v);
- XMVECTOR VA = XMVectorMultiply(V, TwoDivPi);
- VA = XMVectorRound(VA);
- XMVECTOR VC = XMVectorNegativeMultiplySubtract(VA, C0, V);
- XMVECTOR VB = XMVectorAbs(VA);
- VC = XMVectorNegativeMultiplySubtract(VA, C1, VC);
- #if defined(_XM_ARM_NEON_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_)
- VB = vcvtq_u32_f32( VB );
- #elif defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_)
- reinterpret_cast<__m128i *>(&VB)[0] = _mm_cvttps_epi32(VB);
- #else
- for (size_t i = 0; i < 4; i++)
- {
- VB.vector4_u32[i] = (uint32_t)VB.vector4_f32[i];
- }
- #endif
- XMVECTOR VC2 = XMVectorMultiply(VC, VC);
- XMVECTOR T7 = XMVectorSplatW(TanCoefficients1.v);
- XMVECTOR T6 = XMVectorSplatZ(TanCoefficients1.v);
- XMVECTOR T4 = XMVectorSplatX(TanCoefficients1.v);
- XMVECTOR T3 = XMVectorSplatW(TanCoefficients0.v);
- XMVECTOR T5 = XMVectorSplatY(TanCoefficients1.v);
- XMVECTOR T2 = XMVectorSplatZ(TanCoefficients0.v);
- XMVECTOR T1 = XMVectorSplatY(TanCoefficients0.v);
- XMVECTOR T0 = XMVectorSplatX(TanCoefficients0.v);
- XMVECTOR VBIsEven = XMVectorAndInt(VB, Mask.v);
- VBIsEven = XMVectorEqualInt(VBIsEven, Zero);
- XMVECTOR N = XMVectorMultiplyAdd(VC2, T7, T6);
- XMVECTOR D = XMVectorMultiplyAdd(VC2, T4, T3);
- N = XMVectorMultiplyAdd(VC2, N, T5);
- D = XMVectorMultiplyAdd(VC2, D, T2);
- N = XMVectorMultiply(VC2, N);
- D = XMVectorMultiplyAdd(VC2, D, T1);
- N = XMVectorMultiplyAdd(VC, N, VC);
- XMVECTOR VCNearZero = XMVectorInBounds(VC, Epsilon);
- D = XMVectorMultiplyAdd(VC2, D, T0);
- N = XMVectorSelect(N, VC, VCNearZero);
- D = XMVectorSelect(D, g_XMOne.v, VCNearZero);
- XMVECTOR R0 = XMVectorNegate(N);
- XMVECTOR R1 = XMVectorDivide(N,D);
- R0 = XMVectorDivide(D,R0);
- XMVECTOR VIsZero = XMVectorEqual(V, Zero);
- XMVECTOR Result = XMVectorSelect(R0, R1, VBIsEven);
- Result = XMVectorSelect(Result, Zero, VIsZero);
- return Result;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorSinH
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- sinhf(V.vector4_f32[0]),
- sinhf(V.vector4_f32[1]),
- sinhf(V.vector4_f32[2]),
- sinhf(V.vector4_f32[3])
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- static const XMVECTORF32 Scale = { { { 1.442695040888963f, 1.442695040888963f, 1.442695040888963f, 1.442695040888963f } } }; // 1.0f / ln(2.0f)
- XMVECTOR V1 = vmlaq_f32( g_XMNegativeOne.v, V, Scale.v );
- XMVECTOR V2 = vmlsq_f32( g_XMNegativeOne.v, V, Scale.v );
- XMVECTOR E1 = XMVectorExp(V1);
- XMVECTOR E2 = XMVectorExp(V2);
- return vsubq_f32(E1, E2);
- #elif defined(_XM_SSE_INTRINSICS_)
- static const XMVECTORF32 Scale = { { { 1.442695040888963f, 1.442695040888963f, 1.442695040888963f, 1.442695040888963f } } }; // 1.0f / ln(2.0f)
- XMVECTOR V1 = _mm_mul_ps(V, Scale);
- V1 = _mm_add_ps(V1,g_XMNegativeOne);
- XMVECTOR V2 = _mm_mul_ps(V, Scale);
- V2 = _mm_sub_ps(g_XMNegativeOne,V2);
- XMVECTOR E1 = XMVectorExp(V1);
- XMVECTOR E2 = XMVectorExp(V2);
- return _mm_sub_ps(E1, E2);
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorCosH
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- coshf(V.vector4_f32[0]),
- coshf(V.vector4_f32[1]),
- coshf(V.vector4_f32[2]),
- coshf(V.vector4_f32[3])
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- static const XMVECTORF32 Scale = { { { 1.442695040888963f, 1.442695040888963f, 1.442695040888963f, 1.442695040888963f } } }; // 1.0f / ln(2.0f)
- XMVECTOR V1 = vmlaq_f32(g_XMNegativeOne.v, V, Scale.v);
- XMVECTOR V2 = vmlsq_f32(g_XMNegativeOne.v, V, Scale.v);
- XMVECTOR E1 = XMVectorExp(V1);
- XMVECTOR E2 = XMVectorExp(V2);
- return vaddq_f32(E1, E2);
- #elif defined(_XM_SSE_INTRINSICS_)
- static const XMVECTORF32 Scale = { { { 1.442695040888963f, 1.442695040888963f, 1.442695040888963f, 1.442695040888963f } } }; // 1.0f / ln(2.0f)
- XMVECTOR V1 = _mm_mul_ps(V,Scale.v);
- V1 = _mm_add_ps(V1,g_XMNegativeOne.v);
- XMVECTOR V2 = _mm_mul_ps(V, Scale.v);
- V2 = _mm_sub_ps(g_XMNegativeOne.v,V2);
- XMVECTOR E1 = XMVectorExp(V1);
- XMVECTOR E2 = XMVectorExp(V2);
- return _mm_add_ps(E1, E2);
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorTanH
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- tanhf(V.vector4_f32[0]),
- tanhf(V.vector4_f32[1]),
- tanhf(V.vector4_f32[2]),
- tanhf(V.vector4_f32[3])
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- static const XMVECTORF32 Scale = { { { 2.8853900817779268f, 2.8853900817779268f, 2.8853900817779268f, 2.8853900817779268f } } }; // 2.0f / ln(2.0f)
- XMVECTOR E = vmulq_f32(V, Scale.v);
- E = XMVectorExp(E);
- E = vmlaq_f32( g_XMOneHalf.v, E, g_XMOneHalf.v );
- E = XMVectorReciprocal(E);
- return vsubq_f32(g_XMOne.v, E);
- #elif defined(_XM_SSE_INTRINSICS_)
- static const XMVECTORF32 Scale = { { { 2.8853900817779268f, 2.8853900817779268f, 2.8853900817779268f, 2.8853900817779268f } } }; // 2.0f / ln(2.0f)
- XMVECTOR E = _mm_mul_ps(V, Scale.v);
- E = XMVectorExp(E);
- E = _mm_mul_ps(E,g_XMOneHalf.v);
- E = _mm_add_ps(E,g_XMOneHalf.v);
- E = _mm_div_ps(g_XMOne.v,E);
- return _mm_sub_ps(g_XMOne.v,E);
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorASin
- (
- FXMVECTOR V
- )
- {
- // 7-degree minimax approximation
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- asinf(V.vector4_f32[0]),
- asinf(V.vector4_f32[1]),
- asinf(V.vector4_f32[2]),
- asinf(V.vector4_f32[3])
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t nonnegative = vcgeq_f32(V, g_XMZero);
- float32x4_t x = vabsq_f32(V);
- // Compute (1-|V|), clamp to zero to avoid sqrt of negative number.
- float32x4_t oneMValue = vsubq_f32(g_XMOne, x);
- float32x4_t clampOneMValue = vmaxq_f32(g_XMZero, oneMValue);
- float32x4_t root = XMVectorSqrt(clampOneMValue);
- // Compute polynomial approximation
- const XMVECTOR AC1 = g_XMArcCoefficients1;
- XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(AC1), 0);
- XMVECTOR t0 = vmlaq_lane_f32( vConstants, x, vget_high_f32(AC1), 1 );
- vConstants = vdupq_lane_f32(vget_low_f32(AC1), 1);
- t0 = vmlaq_f32( vConstants, t0, x );
- vConstants = vdupq_lane_f32(vget_low_f32(AC1), 0);
- t0 = vmlaq_f32( vConstants, t0, x );
- const XMVECTOR AC0 = g_XMArcCoefficients0;
- vConstants = vdupq_lane_f32(vget_high_f32(AC0), 1);
- t0 = vmlaq_f32( vConstants, t0, x );
- vConstants = vdupq_lane_f32(vget_high_f32(AC0), 0);
- t0 = vmlaq_f32( vConstants, t0, x );
- vConstants = vdupq_lane_f32(vget_low_f32(AC0), 1);
- t0 = vmlaq_f32( vConstants, t0, x );
- vConstants = vdupq_lane_f32(vget_low_f32(AC0), 0);
- t0 = vmlaq_f32( vConstants, t0, x );
- t0 = vmulq_f32(t0, root);
- float32x4_t t1 = vsubq_f32(g_XMPi, t0);
- t0 = vbslq_f32( nonnegative, t0, t1 );
- t0 = vsubq_f32(g_XMHalfPi, t0);
- return t0;
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128 nonnegative = _mm_cmpge_ps(V, g_XMZero);
- __m128 mvalue = _mm_sub_ps(g_XMZero, V);
- __m128 x = _mm_max_ps(V, mvalue); // |V|
- // Compute (1-|V|), clamp to zero to avoid sqrt of negative number.
- __m128 oneMValue = _mm_sub_ps(g_XMOne, x);
- __m128 clampOneMValue = _mm_max_ps(g_XMZero, oneMValue);
- __m128 root = _mm_sqrt_ps(clampOneMValue); // sqrt(1-|V|)
- // Compute polynomial approximation
- const XMVECTOR AC1 = g_XMArcCoefficients1;
- XMVECTOR vConstants = XM_PERMUTE_PS( AC1, _MM_SHUFFLE(3, 3, 3, 3) );
- __m128 t0 = _mm_mul_ps(vConstants, x);
- vConstants = XM_PERMUTE_PS( AC1, _MM_SHUFFLE(2, 2, 2, 2) );
- t0 = _mm_add_ps(t0, vConstants);
- t0 = _mm_mul_ps(t0, x);
- vConstants = XM_PERMUTE_PS( AC1, _MM_SHUFFLE(1, 1, 1, 1) );
- t0 = _mm_add_ps(t0, vConstants);
- t0 = _mm_mul_ps(t0, x);
- vConstants = XM_PERMUTE_PS( AC1, _MM_SHUFFLE(0, 0, 0, 0) );
- t0 = _mm_add_ps(t0, vConstants);
- t0 = _mm_mul_ps(t0, x);
- const XMVECTOR AC0 = g_XMArcCoefficients0;
- vConstants = XM_PERMUTE_PS( AC0, _MM_SHUFFLE(3, 3, 3, 3) );
- t0 = _mm_add_ps(t0, vConstants);
- t0 = _mm_mul_ps(t0, x);
- vConstants = XM_PERMUTE_PS( AC0,_MM_SHUFFLE(2, 2, 2, 2) );
- t0 = _mm_add_ps(t0, vConstants);
- t0 = _mm_mul_ps(t0, x);
- vConstants = XM_PERMUTE_PS( AC0, _MM_SHUFFLE(1, 1, 1, 1) );
- t0 = _mm_add_ps(t0, vConstants);
- t0 = _mm_mul_ps(t0, x);
- vConstants = XM_PERMUTE_PS( AC0, _MM_SHUFFLE(0, 0, 0, 0) );
- t0 = _mm_add_ps(t0, vConstants);
- t0 = _mm_mul_ps(t0, root);
- __m128 t1 = _mm_sub_ps(g_XMPi, t0);
- t0 = _mm_and_ps(nonnegative, t0);
- t1 = _mm_andnot_ps(nonnegative, t1);
- t0 = _mm_or_ps(t0, t1);
- t0 = _mm_sub_ps(g_XMHalfPi, t0);
- return t0;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorACos
- (
- FXMVECTOR V
- )
- {
- // 7-degree minimax approximation
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- acosf(V.vector4_f32[0]),
- acosf(V.vector4_f32[1]),
- acosf(V.vector4_f32[2]),
- acosf(V.vector4_f32[3])
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t nonnegative = vcgeq_f32(V, g_XMZero);
- float32x4_t x = vabsq_f32(V);
- // Compute (1-|V|), clamp to zero to avoid sqrt of negative number.
- float32x4_t oneMValue = vsubq_f32(g_XMOne, x);
- float32x4_t clampOneMValue = vmaxq_f32(g_XMZero, oneMValue);
- float32x4_t root = XMVectorSqrt(clampOneMValue);
- // Compute polynomial approximation
- const XMVECTOR AC1 = g_XMArcCoefficients1;
- XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(AC1), 0);
- XMVECTOR t0 = vmlaq_lane_f32( vConstants, x, vget_high_f32(AC1), 1 );
- vConstants = vdupq_lane_f32(vget_low_f32(AC1), 1);
- t0 = vmlaq_f32( vConstants, t0, x );
- vConstants = vdupq_lane_f32(vget_low_f32(AC1), 0);
- t0 = vmlaq_f32( vConstants, t0, x );
- const XMVECTOR AC0 = g_XMArcCoefficients0;
- vConstants = vdupq_lane_f32(vget_high_f32(AC0), 1);
- t0 = vmlaq_f32( vConstants, t0, x );
- vConstants = vdupq_lane_f32(vget_high_f32(AC0), 0);
- t0 = vmlaq_f32( vConstants, t0, x );
- vConstants = vdupq_lane_f32(vget_low_f32(AC0), 1);
- t0 = vmlaq_f32( vConstants, t0, x );
- vConstants = vdupq_lane_f32(vget_low_f32(AC0), 0);
- t0 = vmlaq_f32( vConstants, t0, x );
- t0 = vmulq_f32(t0, root);
- float32x4_t t1 = vsubq_f32(g_XMPi, t0);
- t0 = vbslq_f32( nonnegative, t0, t1 );
- return t0;
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128 nonnegative = _mm_cmpge_ps(V, g_XMZero);
- __m128 mvalue = _mm_sub_ps(g_XMZero, V);
- __m128 x = _mm_max_ps(V, mvalue); // |V|
- // Compute (1-|V|), clamp to zero to avoid sqrt of negative number.
- __m128 oneMValue = _mm_sub_ps(g_XMOne, x);
- __m128 clampOneMValue = _mm_max_ps(g_XMZero, oneMValue);
- __m128 root = _mm_sqrt_ps(clampOneMValue); // sqrt(1-|V|)
- // Compute polynomial approximation
- const XMVECTOR AC1 = g_XMArcCoefficients1;
- XMVECTOR vConstants = XM_PERMUTE_PS( AC1, _MM_SHUFFLE(3, 3, 3, 3) );
- __m128 t0 = _mm_mul_ps(vConstants, x);
- vConstants = XM_PERMUTE_PS( AC1, _MM_SHUFFLE(2, 2, 2, 2) );
- t0 = _mm_add_ps(t0, vConstants);
- t0 = _mm_mul_ps(t0, x);
- vConstants = XM_PERMUTE_PS( AC1, _MM_SHUFFLE(1, 1, 1, 1) );
- t0 = _mm_add_ps(t0, vConstants);
- t0 = _mm_mul_ps(t0, x);
- vConstants = XM_PERMUTE_PS( AC1, _MM_SHUFFLE(0, 0, 0, 0) );
- t0 = _mm_add_ps(t0, vConstants);
- t0 = _mm_mul_ps(t0, x);
- const XMVECTOR AC0 = g_XMArcCoefficients0;
- vConstants = XM_PERMUTE_PS( AC0, _MM_SHUFFLE(3, 3, 3, 3) );
- t0 = _mm_add_ps(t0, vConstants);
- t0 = _mm_mul_ps(t0, x);
- vConstants = XM_PERMUTE_PS( AC0, _MM_SHUFFLE(2, 2, 2, 2) );
- t0 = _mm_add_ps(t0, vConstants);
- t0 = _mm_mul_ps(t0, x);
- vConstants = XM_PERMUTE_PS( AC0, _MM_SHUFFLE(1, 1, 1, 1) );
- t0 = _mm_add_ps(t0, vConstants);
- t0 = _mm_mul_ps(t0, x);
- vConstants = XM_PERMUTE_PS( AC0, _MM_SHUFFLE(0, 0, 0, 0) );
- t0 = _mm_add_ps(t0, vConstants);
- t0 = _mm_mul_ps(t0, root);
- __m128 t1 = _mm_sub_ps(g_XMPi, t0);
- t0 = _mm_and_ps(nonnegative, t0);
- t1 = _mm_andnot_ps(nonnegative, t1);
- t0 = _mm_or_ps(t0, t1);
- return t0;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorATan
- (
- FXMVECTOR V
- )
- {
- // 17-degree minimax approximation
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- atanf(V.vector4_f32[0]),
- atanf(V.vector4_f32[1]),
- atanf(V.vector4_f32[2]),
- atanf(V.vector4_f32[3])
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- float32x4_t absV = vabsq_f32(V);
- float32x4_t invV = XMVectorReciprocal(V);
- uint32x4_t comp = vcgtq_f32(V, g_XMOne);
- uint32x4_t sign = vbslq_f32(comp, g_XMOne, g_XMNegativeOne);
- comp = vcleq_f32(absV, g_XMOne);
- sign = vbslq_f32(comp, g_XMZero, sign);
- uint32x4_t x = vbslq_f32(comp, V, invV);
- float32x4_t x2 = vmulq_f32(x, x);
- // Compute polynomial approximation
- const XMVECTOR TC1 = g_XMATanCoefficients1;
- XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(TC1), 0);
- XMVECTOR Result = vmlaq_lane_f32( vConstants, x2, vget_high_f32(TC1), 1 );
- vConstants = vdupq_lane_f32(vget_low_f32(TC1), 1);
- Result = vmlaq_f32( vConstants, Result, x2 );
- vConstants = vdupq_lane_f32(vget_low_f32(TC1), 0);
- Result = vmlaq_f32( vConstants, Result, x2 );
- const XMVECTOR TC0 = g_XMATanCoefficients0;
- vConstants = vdupq_lane_f32(vget_high_f32(TC0), 1);
- Result = vmlaq_f32( vConstants, Result, x2 );
- vConstants = vdupq_lane_f32(vget_high_f32(TC0), 0);
- Result = vmlaq_f32( vConstants, Result, x2 );
- vConstants = vdupq_lane_f32(vget_low_f32(TC0), 1);
- Result = vmlaq_f32( vConstants, Result, x2 );
- vConstants = vdupq_lane_f32(vget_low_f32(TC0), 0);
- Result = vmlaq_f32( vConstants, Result, x2 );
- Result = vmlaq_f32( g_XMOne, Result, x2 );
- Result = vmulq_f32( Result, x );
- float32x4_t result1 = vmulq_f32(sign, g_XMHalfPi);
- result1 = vsubq_f32(result1, Result);
- comp = vceqq_f32(sign, g_XMZero);
- Result = vbslq_f32( comp, Result, result1 );
- return Result;
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128 absV = XMVectorAbs(V);
- __m128 invV = _mm_div_ps(g_XMOne, V);
- __m128 comp = _mm_cmpgt_ps(V, g_XMOne);
- __m128 select0 = _mm_and_ps(comp, g_XMOne);
- __m128 select1 = _mm_andnot_ps(comp, g_XMNegativeOne);
- __m128 sign = _mm_or_ps(select0, select1);
- comp = _mm_cmple_ps(absV, g_XMOne);
- select0 = _mm_and_ps(comp, g_XMZero);
- select1 = _mm_andnot_ps(comp, sign);
- sign = _mm_or_ps(select0, select1);
- select0 = _mm_and_ps(comp, V);
- select1 = _mm_andnot_ps(comp, invV);
- __m128 x = _mm_or_ps(select0, select1);
- __m128 x2 = _mm_mul_ps(x, x);
- // Compute polynomial approximation
- const XMVECTOR TC1 = g_XMATanCoefficients1;
- XMVECTOR vConstants = XM_PERMUTE_PS( TC1, _MM_SHUFFLE(3, 3, 3, 3) );
- __m128 Result = _mm_mul_ps(vConstants, x2);
- vConstants = XM_PERMUTE_PS( TC1, _MM_SHUFFLE(2, 2, 2, 2) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- vConstants = XM_PERMUTE_PS( TC1, _MM_SHUFFLE(1, 1, 1, 1) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- vConstants = XM_PERMUTE_PS( TC1, _MM_SHUFFLE(0, 0, 0, 0) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- const XMVECTOR TC0 = g_XMATanCoefficients0;
- vConstants = XM_PERMUTE_PS( TC0, _MM_SHUFFLE(3, 3, 3, 3) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- vConstants = XM_PERMUTE_PS( TC0, _MM_SHUFFLE(2, 2, 2, 2) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- vConstants = XM_PERMUTE_PS( TC0, _MM_SHUFFLE(1, 1, 1, 1) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- vConstants = XM_PERMUTE_PS( TC0, _MM_SHUFFLE(0, 0, 0, 0) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- Result = _mm_add_ps(Result, g_XMOne);
- Result = _mm_mul_ps(Result, x);
- __m128 result1 = _mm_mul_ps(sign, g_XMHalfPi);
- result1 = _mm_sub_ps(result1, Result);
- comp = _mm_cmpeq_ps(sign, g_XMZero);
- select0 = _mm_and_ps(comp, Result);
- select1 = _mm_andnot_ps(comp, result1);
- Result = _mm_or_ps(select0, select1);
- return Result;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorATan2
- (
- FXMVECTOR Y,
- FXMVECTOR X
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- atan2f(Y.vector4_f32[0], X.vector4_f32[0]),
- atan2f(Y.vector4_f32[1], X.vector4_f32[1]),
- atan2f(Y.vector4_f32[2], X.vector4_f32[2]),
- atan2f(Y.vector4_f32[3], X.vector4_f32[3])
- } } };
- return Result.v;
- #else
- // Return the inverse tangent of Y / X in the range of -Pi to Pi with the following exceptions:
- // Y == 0 and X is Negative -> Pi with the sign of Y
- // y == 0 and x is positive -> 0 with the sign of y
- // Y != 0 and X == 0 -> Pi / 2 with the sign of Y
- // Y != 0 and X is Negative -> atan(y/x) + (PI with the sign of Y)
- // X == -Infinity and Finite Y -> Pi with the sign of Y
- // X == +Infinity and Finite Y -> 0 with the sign of Y
- // Y == Infinity and X is Finite -> Pi / 2 with the sign of Y
- // Y == Infinity and X == -Infinity -> 3Pi / 4 with the sign of Y
- // Y == Infinity and X == +Infinity -> Pi / 4 with the sign of Y
- static const XMVECTORF32 ATan2Constants = { { { XM_PI, XM_PIDIV2, XM_PIDIV4, XM_PI * 3.0f / 4.0f } } };
- XMVECTOR Zero = XMVectorZero();
- XMVECTOR ATanResultValid = XMVectorTrueInt();
- XMVECTOR Pi = XMVectorSplatX(ATan2Constants);
- XMVECTOR PiOverTwo = XMVectorSplatY(ATan2Constants);
- XMVECTOR PiOverFour = XMVectorSplatZ(ATan2Constants);
- XMVECTOR ThreePiOverFour = XMVectorSplatW(ATan2Constants);
- XMVECTOR YEqualsZero = XMVectorEqual(Y, Zero);
- XMVECTOR XEqualsZero = XMVectorEqual(X, Zero);
- XMVECTOR XIsPositive = XMVectorAndInt(X, g_XMNegativeZero.v);
- XIsPositive = XMVectorEqualInt(XIsPositive, Zero);
- XMVECTOR YEqualsInfinity = XMVectorIsInfinite(Y);
- XMVECTOR XEqualsInfinity = XMVectorIsInfinite(X);
- XMVECTOR YSign = XMVectorAndInt(Y, g_XMNegativeZero.v);
- Pi = XMVectorOrInt(Pi, YSign);
- PiOverTwo = XMVectorOrInt(PiOverTwo, YSign);
- PiOverFour = XMVectorOrInt(PiOverFour, YSign);
- ThreePiOverFour = XMVectorOrInt(ThreePiOverFour, YSign);
- XMVECTOR R1 = XMVectorSelect(Pi, YSign, XIsPositive);
- XMVECTOR R2 = XMVectorSelect(ATanResultValid, PiOverTwo, XEqualsZero);
- XMVECTOR R3 = XMVectorSelect(R2, R1, YEqualsZero);
- XMVECTOR R4 = XMVectorSelect(ThreePiOverFour, PiOverFour, XIsPositive);
- XMVECTOR R5 = XMVectorSelect(PiOverTwo, R4, XEqualsInfinity);
- XMVECTOR Result = XMVectorSelect(R3, R5, YEqualsInfinity);
- ATanResultValid = XMVectorEqualInt(Result, ATanResultValid);
- XMVECTOR V = XMVectorDivide(Y, X);
- XMVECTOR R0 = XMVectorATan(V);
- R1 = XMVectorSelect( Pi, g_XMNegativeZero, XIsPositive );
- R2 = XMVectorAdd(R0, R1);
- return XMVectorSelect(Result, R2, ATanResultValid);
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorSinEst
- (
- FXMVECTOR V
- )
- {
- // 7-degree minimax approximation
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- sinf(V.vector4_f32[0]),
- sinf(V.vector4_f32[1]),
- sinf(V.vector4_f32[2]),
- sinf(V.vector4_f32[3])
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Force the value within the bounds of pi
- XMVECTOR x = XMVectorModAngles(V);
- // Map in [-pi/2,pi/2] with sin(y) = sin(x).
- uint32x4_t sign = vandq_u32(x, g_XMNegativeZero);
- uint32x4_t c = vorrq_u32(g_XMPi, sign); // pi when x >= 0, -pi when x < 0
- float32x4_t absx = vabsq_f32( x );
- float32x4_t rflx = vsubq_f32(c, x);
- uint32x4_t comp = vcleq_f32(absx, g_XMHalfPi);
- x = vbslq_f32( comp, x, rflx );
- float32x4_t x2 = vmulq_f32(x, x);
- // Compute polynomial approximation
- const XMVECTOR SEC = g_XMSinCoefficients1;
- XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(SEC), 0);
- XMVECTOR Result = vmlaq_lane_f32(vConstants, x2, vget_high_f32(SEC), 1);
- vConstants = vdupq_lane_f32(vget_low_f32(SEC), 1);
- Result = vmlaq_f32(vConstants, Result, x2);
- Result = vmlaq_f32(g_XMOne, Result, x2);
- Result = vmulq_f32(Result, x);
- return Result;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Force the value within the bounds of pi
- XMVECTOR x = XMVectorModAngles(V);
- // Map in [-pi/2,pi/2] with sin(y) = sin(x).
- __m128 sign = _mm_and_ps(x, g_XMNegativeZero);
- __m128 c = _mm_or_ps(g_XMPi, sign); // pi when x >= 0, -pi when x < 0
- __m128 absx = _mm_andnot_ps(sign, x); // |x|
- __m128 rflx = _mm_sub_ps(c, x);
- __m128 comp = _mm_cmple_ps(absx, g_XMHalfPi);
- __m128 select0 = _mm_and_ps(comp, x);
- __m128 select1 = _mm_andnot_ps(comp, rflx);
- x = _mm_or_ps(select0, select1);
- __m128 x2 = _mm_mul_ps(x, x);
- // Compute polynomial approximation
- const XMVECTOR SEC = g_XMSinCoefficients1;
- XMVECTOR vConstants = XM_PERMUTE_PS( SEC, _MM_SHUFFLE(3, 3, 3, 3) );
- __m128 Result = _mm_mul_ps(vConstants, x2);
- vConstants = XM_PERMUTE_PS( SEC, _MM_SHUFFLE(2, 2, 2, 2) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- vConstants = XM_PERMUTE_PS( SEC, _MM_SHUFFLE(1, 1, 1, 1) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- Result = _mm_add_ps(Result, g_XMOne);
- Result = _mm_mul_ps(Result, x);
- return Result;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorCosEst
- (
- FXMVECTOR V
- )
- {
- // 6-degree minimax approximation
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- cosf(V.vector4_f32[0]),
- cosf(V.vector4_f32[1]),
- cosf(V.vector4_f32[2]),
- cosf(V.vector4_f32[3])
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Map V to x in [-pi,pi].
- XMVECTOR x = XMVectorModAngles(V);
- // Map in [-pi/2,pi/2] with cos(y) = sign*cos(x).
- uint32x4_t sign = vandq_u32(x, g_XMNegativeZero);
- uint32x4_t c = vorrq_u32(g_XMPi, sign); // pi when x >= 0, -pi when x < 0
- float32x4_t absx = vabsq_f32( x );
- float32x4_t rflx = vsubq_f32(c, x);
- uint32x4_t comp = vcleq_f32(absx, g_XMHalfPi);
- x = vbslq_f32( comp, x, rflx );
- sign = vbslq_f32( comp, g_XMOne, g_XMNegativeOne );
- float32x4_t x2 = vmulq_f32(x, x);
- // Compute polynomial approximation
- const XMVECTOR CEC = g_XMCosCoefficients1;
- XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(CEC), 0);
- XMVECTOR Result = vmlaq_lane_f32(vConstants, x2, vget_high_f32(CEC), 1);
- vConstants = vdupq_lane_f32(vget_low_f32(CEC), 1);
- Result = vmlaq_f32(vConstants, Result, x2);
- Result = vmlaq_f32(g_XMOne, Result, x2);
- Result = vmulq_f32(Result, sign);
- return Result;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Map V to x in [-pi,pi].
- XMVECTOR x = XMVectorModAngles(V);
- // Map in [-pi/2,pi/2] with cos(y) = sign*cos(x).
- XMVECTOR sign = _mm_and_ps(x, g_XMNegativeZero);
- __m128 c = _mm_or_ps(g_XMPi, sign); // pi when x >= 0, -pi when x < 0
- __m128 absx = _mm_andnot_ps(sign, x); // |x|
- __m128 rflx = _mm_sub_ps(c, x);
- __m128 comp = _mm_cmple_ps(absx, g_XMHalfPi);
- __m128 select0 = _mm_and_ps(comp, x);
- __m128 select1 = _mm_andnot_ps(comp, rflx);
- x = _mm_or_ps(select0, select1);
- select0 = _mm_and_ps(comp, g_XMOne);
- select1 = _mm_andnot_ps(comp, g_XMNegativeOne);
- sign = _mm_or_ps(select0, select1);
- __m128 x2 = _mm_mul_ps(x, x);
- // Compute polynomial approximation
- const XMVECTOR CEC = g_XMCosCoefficients1;
- XMVECTOR vConstants = XM_PERMUTE_PS( CEC, _MM_SHUFFLE(3, 3, 3, 3) );
- __m128 Result = _mm_mul_ps(vConstants, x2);
- vConstants = XM_PERMUTE_PS( CEC, _MM_SHUFFLE(2, 2, 2, 2) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- vConstants = XM_PERMUTE_PS( CEC, _MM_SHUFFLE(1, 1, 1, 1) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- Result = _mm_add_ps(Result, g_XMOne);
- Result = _mm_mul_ps(Result, sign);
- return Result;
- #endif
- }
- //------------------------------------------------------------------------------
- _Use_decl_annotations_
- inline void XM_CALLCONV XMVectorSinCosEst
- (
- XMVECTOR* pSin,
- XMVECTOR* pCos,
- FXMVECTOR V
- )
- {
- assert(pSin != nullptr);
- assert(pCos != nullptr);
- // 7/6-degree minimax approximation
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Sin = { { {
- sinf(V.vector4_f32[0]),
- sinf(V.vector4_f32[1]),
- sinf(V.vector4_f32[2]),
- sinf(V.vector4_f32[3])
- } } };
- XMVECTORF32 Cos = { { {
- cosf(V.vector4_f32[0]),
- cosf(V.vector4_f32[1]),
- cosf(V.vector4_f32[2]),
- cosf(V.vector4_f32[3])
- } } };
- *pSin = Sin.v;
- *pCos = Cos.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Force the value within the bounds of pi
- XMVECTOR x = XMVectorModAngles(V);
- // Map in [-pi/2,pi/2] with cos(y) = sign*cos(x).
- uint32x4_t sign = vandq_u32(x, g_XMNegativeZero);
- uint32x4_t c = vorrq_u32(g_XMPi, sign); // pi when x >= 0, -pi when x < 0
- float32x4_t absx = vabsq_f32( x );
- float32x4_t rflx = vsubq_f32(c, x);
- uint32x4_t comp = vcleq_f32(absx, g_XMHalfPi);
- x = vbslq_f32( comp, x, rflx );
- sign = vbslq_f32( comp, g_XMOne, g_XMNegativeOne );
- float32x4_t x2 = vmulq_f32(x, x);
- // Compute polynomial approximation for sine
- const XMVECTOR SEC = g_XMSinCoefficients1;
- XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(SEC), 0);
- XMVECTOR Result = vmlaq_lane_f32(vConstants, x2, vget_high_f32(SEC), 1);
- vConstants = vdupq_lane_f32(vget_low_f32(SEC), 1);
- Result = vmlaq_f32(vConstants, Result, x2);
- Result = vmlaq_f32(g_XMOne, Result, x2);
- *pSin = vmulq_f32(Result, x);
- // Compute polynomial approximation
- const XMVECTOR CEC = g_XMCosCoefficients1;
- vConstants = vdupq_lane_f32(vget_high_f32(CEC), 0);
- Result = vmlaq_lane_f32(vConstants, x2, vget_high_f32(CEC), 1);
- vConstants = vdupq_lane_f32(vget_low_f32(CEC), 1);
- Result = vmlaq_f32(vConstants, Result, x2);
- Result = vmlaq_f32(g_XMOne, Result, x2);
- *pCos = vmulq_f32(Result, sign);
- #elif defined(_XM_SSE_INTRINSICS_)
- // Force the value within the bounds of pi
- XMVECTOR x = XMVectorModAngles(V);
- // Map in [-pi/2,pi/2] with sin(y) = sin(x), cos(y) = sign*cos(x).
- XMVECTOR sign = _mm_and_ps(x, g_XMNegativeZero);
- __m128 c = _mm_or_ps(g_XMPi, sign); // pi when x >= 0, -pi when x < 0
- __m128 absx = _mm_andnot_ps(sign, x); // |x|
- __m128 rflx = _mm_sub_ps(c, x);
- __m128 comp = _mm_cmple_ps(absx, g_XMHalfPi);
- __m128 select0 = _mm_and_ps(comp, x);
- __m128 select1 = _mm_andnot_ps(comp, rflx);
- x = _mm_or_ps(select0, select1);
- select0 = _mm_and_ps(comp, g_XMOne);
- select1 = _mm_andnot_ps(comp, g_XMNegativeOne);
- sign = _mm_or_ps(select0, select1);
- __m128 x2 = _mm_mul_ps(x, x);
- // Compute polynomial approximation for sine
- const XMVECTOR SEC = g_XMSinCoefficients1;
- XMVECTOR vConstants = XM_PERMUTE_PS( SEC, _MM_SHUFFLE(3, 3, 3, 3) );
- __m128 Result = _mm_mul_ps(vConstants, x2);
- vConstants = XM_PERMUTE_PS( SEC, _MM_SHUFFLE(2, 2, 2, 2) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- vConstants = XM_PERMUTE_PS( SEC, _MM_SHUFFLE(1, 1, 1, 1) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- Result = _mm_add_ps(Result, g_XMOne);
- Result = _mm_mul_ps(Result, x);
- *pSin = Result;
- // Compute polynomial approximation for cosine
- const XMVECTOR CEC = g_XMCosCoefficients1;
- vConstants = XM_PERMUTE_PS( CEC, _MM_SHUFFLE(3, 3, 3, 3) );
- Result = _mm_mul_ps(vConstants, x2);
- vConstants = XM_PERMUTE_PS( CEC, _MM_SHUFFLE(2, 2, 2, 2) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- vConstants = XM_PERMUTE_PS( CEC, _MM_SHUFFLE(1, 1, 1, 1) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- Result = _mm_add_ps(Result, g_XMOne);
- Result = _mm_mul_ps(Result, sign);
- *pCos = Result;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorTanEst
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- tanf(V.vector4_f32[0]),
- tanf(V.vector4_f32[1]),
- tanf(V.vector4_f32[2]),
- tanf(V.vector4_f32[3])
- } } };
- return Result.v;
- #else
- XMVECTOR OneOverPi = XMVectorSplatW(g_XMTanEstCoefficients.v);
- XMVECTOR V1 = XMVectorMultiply(V, OneOverPi);
- V1 = XMVectorRound(V1);
- V1 = XMVectorNegativeMultiplySubtract(g_XMPi.v, V1, V);
- XMVECTOR T0 = XMVectorSplatX(g_XMTanEstCoefficients.v);
- XMVECTOR T1 = XMVectorSplatY(g_XMTanEstCoefficients.v);
- XMVECTOR T2 = XMVectorSplatZ(g_XMTanEstCoefficients.v);
- XMVECTOR V2T2 = XMVectorNegativeMultiplySubtract(V1, V1, T2);
- XMVECTOR V2 = XMVectorMultiply(V1, V1);
- XMVECTOR V1T0 = XMVectorMultiply(V1, T0);
- XMVECTOR V1T1 = XMVectorMultiply(V1, T1);
- XMVECTOR D = XMVectorReciprocalEst(V2T2);
- XMVECTOR N = XMVectorMultiplyAdd(V2, V1T1, V1T0);
- return XMVectorMultiply(N, D);
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorASinEst
- (
- FXMVECTOR V
- )
- {
- // 3-degree minimax approximation
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result;
- Result.f[0] = asinf( V.vector4_f32[0] );
- Result.f[1] = asinf( V.vector4_f32[1] );
- Result.f[2] = asinf( V.vector4_f32[2] );
- Result.f[3] = asinf( V.vector4_f32[3] );
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t nonnegative = vcgeq_f32(V, g_XMZero);
- float32x4_t x = vabsq_f32(V);
- // Compute (1-|V|), clamp to zero to avoid sqrt of negative number.
- float32x4_t oneMValue = vsubq_f32(g_XMOne, x);
- float32x4_t clampOneMValue = vmaxq_f32(g_XMZero, oneMValue);
- float32x4_t root = XMVectorSqrt(clampOneMValue);
- // Compute polynomial approximation
- const XMVECTOR AEC = g_XMArcEstCoefficients;
- XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(AEC), 0);
- XMVECTOR t0 = vmlaq_lane_f32( vConstants, x, vget_high_f32(AEC), 1 );
- vConstants = vdupq_lane_f32(vget_low_f32(AEC), 1);
- t0 = vmlaq_f32( vConstants, t0, x );
- vConstants = vdupq_lane_f32(vget_low_f32(AEC), 0);
- t0 = vmlaq_f32( vConstants, t0, x );
- t0 = vmulq_f32(t0, root);
- float32x4_t t1 = vsubq_f32(g_XMPi, t0);
- t0 = vbslq_f32( nonnegative, t0, t1 );
- t0 = vsubq_f32(g_XMHalfPi, t0);
- return t0;
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128 nonnegative = _mm_cmpge_ps(V, g_XMZero);
- __m128 mvalue = _mm_sub_ps(g_XMZero, V);
- __m128 x = _mm_max_ps(V, mvalue); // |V|
- // Compute (1-|V|), clamp to zero to avoid sqrt of negative number.
- __m128 oneMValue = _mm_sub_ps(g_XMOne, x);
- __m128 clampOneMValue = _mm_max_ps(g_XMZero, oneMValue);
- __m128 root = _mm_sqrt_ps(clampOneMValue); // sqrt(1-|V|)
- // Compute polynomial approximation
- const XMVECTOR AEC = g_XMArcEstCoefficients;
- XMVECTOR vConstants = XM_PERMUTE_PS( AEC, _MM_SHUFFLE(3, 3, 3, 3) );
- __m128 t0 = _mm_mul_ps(vConstants, x);
- vConstants = XM_PERMUTE_PS( AEC, _MM_SHUFFLE(2, 2, 2, 2) );
- t0 = _mm_add_ps(t0, vConstants);
- t0 = _mm_mul_ps(t0, x);
- vConstants = XM_PERMUTE_PS( AEC, _MM_SHUFFLE(1, 1, 1, 1) );
- t0 = _mm_add_ps(t0, vConstants);
- t0 = _mm_mul_ps(t0, x);
- vConstants = XM_PERMUTE_PS( AEC, _MM_SHUFFLE(0, 0, 0, 0) );
- t0 = _mm_add_ps(t0, vConstants);
- t0 = _mm_mul_ps(t0, root);
- __m128 t1 = _mm_sub_ps(g_XMPi, t0);
- t0 = _mm_and_ps(nonnegative, t0);
- t1 = _mm_andnot_ps(nonnegative, t1);
- t0 = _mm_or_ps(t0, t1);
- t0 = _mm_sub_ps(g_XMHalfPi, t0);
- return t0;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorACosEst
- (
- FXMVECTOR V
- )
- {
- // 3-degree minimax approximation
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- acosf(V.vector4_f32[0]),
- acosf(V.vector4_f32[1]),
- acosf(V.vector4_f32[2]),
- acosf(V.vector4_f32[3])
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t nonnegative = vcgeq_f32(V, g_XMZero);
- float32x4_t x = vabsq_f32(V);
- // Compute (1-|V|), clamp to zero to avoid sqrt of negative number.
- float32x4_t oneMValue = vsubq_f32(g_XMOne, x);
- float32x4_t clampOneMValue = vmaxq_f32(g_XMZero, oneMValue);
- float32x4_t root = XMVectorSqrt(clampOneMValue);
- // Compute polynomial approximation
- const XMVECTOR AEC = g_XMArcEstCoefficients;
- XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(AEC), 0);
- XMVECTOR t0 = vmlaq_lane_f32( vConstants, x, vget_high_f32(AEC), 1 );
- vConstants = vdupq_lane_f32(vget_low_f32(AEC), 1);
- t0 = vmlaq_f32( vConstants, t0, x );
- vConstants = vdupq_lane_f32(vget_low_f32(AEC), 0);
- t0 = vmlaq_f32( vConstants, t0, x );
- t0 = vmulq_f32(t0, root);
- float32x4_t t1 = vsubq_f32(g_XMPi, t0);
- t0 = vbslq_f32( nonnegative, t0, t1 );
- return t0;
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128 nonnegative = _mm_cmpge_ps(V, g_XMZero);
- __m128 mvalue = _mm_sub_ps(g_XMZero, V);
- __m128 x = _mm_max_ps(V, mvalue); // |V|
- // Compute (1-|V|), clamp to zero to avoid sqrt of negative number.
- __m128 oneMValue = _mm_sub_ps(g_XMOne, x);
- __m128 clampOneMValue = _mm_max_ps(g_XMZero, oneMValue);
- __m128 root = _mm_sqrt_ps(clampOneMValue); // sqrt(1-|V|)
- // Compute polynomial approximation
- const XMVECTOR AEC = g_XMArcEstCoefficients;
- XMVECTOR vConstants = XM_PERMUTE_PS( AEC, _MM_SHUFFLE(3, 3, 3, 3) );
- __m128 t0 = _mm_mul_ps(vConstants, x);
- vConstants = XM_PERMUTE_PS( AEC, _MM_SHUFFLE(2, 2, 2, 2) );
- t0 = _mm_add_ps(t0, vConstants);
- t0 = _mm_mul_ps(t0, x);
- vConstants = XM_PERMUTE_PS( AEC, _MM_SHUFFLE(1, 1, 1, 1) );
- t0 = _mm_add_ps(t0, vConstants);
- t0 = _mm_mul_ps(t0, x);
- vConstants = XM_PERMUTE_PS( AEC, _MM_SHUFFLE(0, 0, 0, 0) );
- t0 = _mm_add_ps(t0, vConstants);
- t0 = _mm_mul_ps(t0, root);
- __m128 t1 = _mm_sub_ps(g_XMPi, t0);
- t0 = _mm_and_ps(nonnegative, t0);
- t1 = _mm_andnot_ps(nonnegative, t1);
- t0 = _mm_or_ps(t0, t1);
- return t0;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorATanEst
- (
- FXMVECTOR V
- )
- {
- // 9-degree minimax approximation
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- atanf(V.vector4_f32[0]),
- atanf(V.vector4_f32[1]),
- atanf(V.vector4_f32[2]),
- atanf(V.vector4_f32[3])
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- float32x4_t absV = vabsq_f32(V);
- float32x4_t invV = XMVectorReciprocalEst(V);
- uint32x4_t comp = vcgtq_f32(V, g_XMOne);
- uint32x4_t sign = vbslq_f32(comp, g_XMOne, g_XMNegativeOne );
- comp = vcleq_f32(absV, g_XMOne);
- sign = vbslq_f32(comp, g_XMZero, sign );
- uint32x4_t x = vbslq_f32(comp, V, invV );
- float32x4_t x2 = vmulq_f32(x, x);
- // Compute polynomial approximation
- const XMVECTOR AEC = g_XMATanEstCoefficients1;
- XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(AEC), 0);
- XMVECTOR Result = vmlaq_lane_f32( vConstants, x2, vget_high_f32(AEC), 1 );
- vConstants = vdupq_lane_f32(vget_low_f32(AEC), 1);
- Result = vmlaq_f32( vConstants, Result, x2 );
- vConstants = vdupq_lane_f32(vget_low_f32( AEC), 0);
- Result = vmlaq_f32( vConstants, Result, x2 );
- // ATanEstCoefficients0 is already splatted
- Result = vmlaq_f32( g_XMATanEstCoefficients0, Result, x2 );
- Result = vmulq_f32( Result, x );
- float32x4_t result1 = vmulq_f32(sign, g_XMHalfPi);
- result1 = vsubq_f32(result1, Result);
- comp = vceqq_f32(sign, g_XMZero);
- Result = vbslq_f32( comp, Result, result1 );
- return Result;
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128 absV = XMVectorAbs(V);
- __m128 invV = _mm_div_ps(g_XMOne, V);
- __m128 comp = _mm_cmpgt_ps(V, g_XMOne);
- __m128 select0 = _mm_and_ps(comp, g_XMOne);
- __m128 select1 = _mm_andnot_ps(comp, g_XMNegativeOne);
- __m128 sign = _mm_or_ps(select0, select1);
- comp = _mm_cmple_ps(absV, g_XMOne);
- select0 = _mm_and_ps(comp, g_XMZero);
- select1 = _mm_andnot_ps(comp, sign);
- sign = _mm_or_ps(select0, select1);
- select0 = _mm_and_ps(comp, V);
- select1 = _mm_andnot_ps(comp, invV);
- __m128 x = _mm_or_ps(select0, select1);
- __m128 x2 = _mm_mul_ps(x, x);
- // Compute polynomial approximation
- const XMVECTOR AEC = g_XMATanEstCoefficients1;
- XMVECTOR vConstants = XM_PERMUTE_PS( AEC, _MM_SHUFFLE(3, 3, 3, 3) );
- __m128 Result = _mm_mul_ps(vConstants, x2);
- vConstants = XM_PERMUTE_PS( AEC, _MM_SHUFFLE(2, 2, 2, 2) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- vConstants = XM_PERMUTE_PS( AEC, _MM_SHUFFLE(1, 1, 1, 1) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- vConstants = XM_PERMUTE_PS( AEC, _MM_SHUFFLE(0, 0, 0, 0) );
- Result = _mm_add_ps(Result, vConstants);
- Result = _mm_mul_ps(Result, x2);
- // ATanEstCoefficients0 is already splatted
- Result = _mm_add_ps(Result, g_XMATanEstCoefficients0);
- Result = _mm_mul_ps(Result, x);
- __m128 result1 = _mm_mul_ps(sign, g_XMHalfPi);
- result1 = _mm_sub_ps(result1, Result);
- comp = _mm_cmpeq_ps(sign, g_XMZero);
- select0 = _mm_and_ps(comp, Result);
- select1 = _mm_andnot_ps(comp, result1);
- Result = _mm_or_ps(select0, select1);
- return Result;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorATan2Est
- (
- FXMVECTOR Y,
- FXMVECTOR X
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- atan2f(Y.vector4_f32[0], X.vector4_f32[0]),
- atan2f(Y.vector4_f32[1], X.vector4_f32[1]),
- atan2f(Y.vector4_f32[2], X.vector4_f32[2]),
- atan2f(Y.vector4_f32[3], X.vector4_f32[3]),
- } } };
- return Result.v;
- #else
- static const XMVECTORF32 ATan2Constants = { { { XM_PI, XM_PIDIV2, XM_PIDIV4, 2.3561944905f /* Pi*3/4 */ } } };
- const XMVECTOR Zero = XMVectorZero();
- XMVECTOR ATanResultValid = XMVectorTrueInt();
- XMVECTOR Pi = XMVectorSplatX(ATan2Constants);
- XMVECTOR PiOverTwo = XMVectorSplatY(ATan2Constants);
- XMVECTOR PiOverFour = XMVectorSplatZ(ATan2Constants);
- XMVECTOR ThreePiOverFour = XMVectorSplatW(ATan2Constants);
- XMVECTOR YEqualsZero = XMVectorEqual(Y, Zero);
- XMVECTOR XEqualsZero = XMVectorEqual(X, Zero);
- XMVECTOR XIsPositive = XMVectorAndInt(X, g_XMNegativeZero.v);
- XIsPositive = XMVectorEqualInt(XIsPositive, Zero);
- XMVECTOR YEqualsInfinity = XMVectorIsInfinite(Y);
- XMVECTOR XEqualsInfinity = XMVectorIsInfinite(X);
- XMVECTOR YSign = XMVectorAndInt(Y, g_XMNegativeZero.v);
- Pi = XMVectorOrInt(Pi, YSign);
- PiOverTwo = XMVectorOrInt(PiOverTwo, YSign);
- PiOverFour = XMVectorOrInt(PiOverFour, YSign);
- ThreePiOverFour = XMVectorOrInt(ThreePiOverFour, YSign);
- XMVECTOR R1 = XMVectorSelect(Pi, YSign, XIsPositive);
- XMVECTOR R2 = XMVectorSelect(ATanResultValid, PiOverTwo, XEqualsZero);
- XMVECTOR R3 = XMVectorSelect(R2, R1, YEqualsZero);
- XMVECTOR R4 = XMVectorSelect(ThreePiOverFour, PiOverFour, XIsPositive);
- XMVECTOR R5 = XMVectorSelect(PiOverTwo, R4, XEqualsInfinity);
- XMVECTOR Result = XMVectorSelect(R3, R5, YEqualsInfinity);
- ATanResultValid = XMVectorEqualInt(Result, ATanResultValid);
- XMVECTOR Reciprocal = XMVectorReciprocalEst(X);
- XMVECTOR V = XMVectorMultiply(Y, Reciprocal);
- XMVECTOR R0 = XMVectorATanEst(V);
- R1 = XMVectorSelect( Pi, g_XMNegativeZero, XIsPositive );
- R2 = XMVectorAdd(R0, R1);
- Result = XMVectorSelect(Result, R2, ATanResultValid);
- return Result;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorLerp
- (
- FXMVECTOR V0,
- FXMVECTOR V1,
- float t
- )
- {
- // V0 + t * (V1 - V0)
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR Scale = XMVectorReplicate(t);
- XMVECTOR Length = XMVectorSubtract(V1, V0);
- return XMVectorMultiplyAdd(Length, Scale, V0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- XMVECTOR L = vsubq_f32( V1, V0 );
- return vmlaq_n_f32( V0, L, t );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR L = _mm_sub_ps( V1, V0 );
- XMVECTOR S = _mm_set_ps1( t );
- XMVECTOR Result = _mm_mul_ps( L, S );
- return _mm_add_ps( Result, V0 );
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorLerpV
- (
- FXMVECTOR V0,
- FXMVECTOR V1,
- FXMVECTOR T
- )
- {
- // V0 + T * (V1 - V0)
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR Length = XMVectorSubtract(V1, V0);
- return XMVectorMultiplyAdd(Length, T, V0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- XMVECTOR L = vsubq_f32( V1, V0 );
- return vmlaq_f32( V0, L, T );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR Length = _mm_sub_ps( V1, V0 );
- XMVECTOR Result = _mm_mul_ps( Length, T );
- return _mm_add_ps( Result, V0 );
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorHermite
- (
- FXMVECTOR Position0,
- FXMVECTOR Tangent0,
- FXMVECTOR Position1,
- GXMVECTOR Tangent1,
- float t
- )
- {
- // Result = (2 * t^3 - 3 * t^2 + 1) * Position0 +
- // (t^3 - 2 * t^2 + t) * Tangent0 +
- // (-2 * t^3 + 3 * t^2) * Position1 +
- // (t^3 - t^2) * Tangent1
- #if defined(_XM_NO_INTRINSICS_)
- float t2 = t * t;
- float t3 = t * t2;
- XMVECTOR P0 = XMVectorReplicate(2.0f * t3 - 3.0f * t2 + 1.0f);
- XMVECTOR T0 = XMVectorReplicate(t3 - 2.0f * t2 + t);
- XMVECTOR P1 = XMVectorReplicate(-2.0f * t3 + 3.0f * t2);
- XMVECTOR T1 = XMVectorReplicate(t3 - t2);
- XMVECTOR Result = XMVectorMultiply(P0, Position0);
- Result = XMVectorMultiplyAdd(T0, Tangent0, Result);
- Result = XMVectorMultiplyAdd(P1, Position1, Result);
- Result = XMVectorMultiplyAdd(T1, Tangent1, Result);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- float t2 = t * t;
- float t3 = t * t2;
- float p0 = 2.0f * t3 - 3.0f * t2 + 1.0f;
- float t0 = t3 - 2.0f * t2 + t;
- float p1 = -2.0f * t3 + 3.0f * t2;
- float t1 = t3 - t2;
- XMVECTOR vResult = vmulq_n_f32(Position0, p0 );
- vResult = vmlaq_n_f32( vResult, Tangent0, t0 );
- vResult = vmlaq_n_f32( vResult, Position1, p1 );
- vResult = vmlaq_n_f32( vResult, Tangent1, t1 );
- return vResult;
- #elif defined(_XM_SSE_INTRINSICS_)
- float t2 = t * t;
- float t3 = t * t2;
- XMVECTOR P0 = _mm_set_ps1(2.0f * t3 - 3.0f * t2 + 1.0f);
- XMVECTOR T0 = _mm_set_ps1(t3 - 2.0f * t2 + t);
- XMVECTOR P1 = _mm_set_ps1(-2.0f * t3 + 3.0f * t2);
- XMVECTOR T1 = _mm_set_ps1(t3 - t2);
- XMVECTOR vResult = _mm_mul_ps(P0, Position0);
- XMVECTOR vTemp = _mm_mul_ps(T0, Tangent0);
- vResult = _mm_add_ps(vResult,vTemp);
- vTemp = _mm_mul_ps(P1, Position1);
- vResult = _mm_add_ps(vResult,vTemp);
- vTemp = _mm_mul_ps(T1, Tangent1);
- vResult = _mm_add_ps(vResult,vTemp);
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorHermiteV
- (
- FXMVECTOR Position0,
- FXMVECTOR Tangent0,
- FXMVECTOR Position1,
- GXMVECTOR Tangent1,
- HXMVECTOR T
- )
- {
- // Result = (2 * t^3 - 3 * t^2 + 1) * Position0 +
- // (t^3 - 2 * t^2 + t) * Tangent0 +
- // (-2 * t^3 + 3 * t^2) * Position1 +
- // (t^3 - t^2) * Tangent1
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR T2 = XMVectorMultiply(T, T);
- XMVECTOR T3 = XMVectorMultiply(T , T2);
- XMVECTOR P0 = XMVectorReplicate(2.0f * T3.vector4_f32[0] - 3.0f * T2.vector4_f32[0] + 1.0f);
- XMVECTOR T0 = XMVectorReplicate(T3.vector4_f32[1] - 2.0f * T2.vector4_f32[1] + T.vector4_f32[1]);
- XMVECTOR P1 = XMVectorReplicate(-2.0f * T3.vector4_f32[2] + 3.0f * T2.vector4_f32[2]);
- XMVECTOR T1 = XMVectorReplicate(T3.vector4_f32[3] - T2.vector4_f32[3]);
- XMVECTOR Result = XMVectorMultiply(P0, Position0);
- Result = XMVectorMultiplyAdd(T0, Tangent0, Result);
- Result = XMVectorMultiplyAdd(P1, Position1, Result);
- Result = XMVectorMultiplyAdd(T1, Tangent1, Result);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- static const XMVECTORF32 CatMulT2 = { { { -3.0f, -2.0f, 3.0f, -1.0f } } };
- static const XMVECTORF32 CatMulT3 = { { { 2.0f, 1.0f, -2.0f, 1.0f } } };
- XMVECTOR T2 = vmulq_f32(T,T);
- XMVECTOR T3 = vmulq_f32(T,T2);
- // Mul by the constants against t^2
- T2 = vmulq_f32(T2,CatMulT2);
- // Mul by the constants against t^3
- T3 = vmlaq_f32(T2, T3, CatMulT3 );
- // T3 now has the pre-result.
- // I need to add t.y only
- T2 = vandq_u32(T,g_XMMaskY);
- T3 = vaddq_f32(T3,T2);
- // Add 1.0f to x
- T3 = vaddq_f32(T3,g_XMIdentityR0);
- // Now, I have the constants created
- // Mul the x constant to Position0
- XMVECTOR vResult = vmulq_lane_f32( Position0, vget_low_f32( T3 ), 0 ); // T3[0]
- // Mul the y constant to Tangent0
- vResult = vmlaq_lane_f32(vResult, Tangent0, vget_low_f32( T3 ), 1 ); // T3[1]
- // Mul the z constant to Position1
- vResult = vmlaq_lane_f32(vResult, Position1, vget_high_f32( T3 ), 0 ); // T3[2]
- // Mul the w constant to Tangent1
- vResult = vmlaq_lane_f32(vResult, Tangent1, vget_high_f32( T3 ), 1 ); // T3[3]
- return vResult;
- #elif defined(_XM_SSE_INTRINSICS_)
- static const XMVECTORF32 CatMulT2 = { { { -3.0f, -2.0f, 3.0f, -1.0f } } };
- static const XMVECTORF32 CatMulT3 = { { { 2.0f, 1.0f, -2.0f, 1.0f } } };
- XMVECTOR T2 = _mm_mul_ps(T,T);
- XMVECTOR T3 = _mm_mul_ps(T,T2);
- // Mul by the constants against t^2
- T2 = _mm_mul_ps(T2,CatMulT2);
- // Mul by the constants against t^3
- T3 = _mm_mul_ps(T3,CatMulT3);
- // T3 now has the pre-result.
- T3 = _mm_add_ps(T3,T2);
- // I need to add t.y only
- T2 = _mm_and_ps(T,g_XMMaskY);
- T3 = _mm_add_ps(T3,T2);
- // Add 1.0f to x
- T3 = _mm_add_ps(T3,g_XMIdentityR0);
- // Now, I have the constants created
- // Mul the x constant to Position0
- XMVECTOR vResult = XM_PERMUTE_PS(T3,_MM_SHUFFLE(0,0,0,0));
- vResult = _mm_mul_ps(vResult,Position0);
- // Mul the y constant to Tangent0
- T2 = XM_PERMUTE_PS(T3,_MM_SHUFFLE(1,1,1,1));
- T2 = _mm_mul_ps(T2,Tangent0);
- vResult = _mm_add_ps(vResult,T2);
- // Mul the z constant to Position1
- T2 = XM_PERMUTE_PS(T3,_MM_SHUFFLE(2,2,2,2));
- T2 = _mm_mul_ps(T2,Position1);
- vResult = _mm_add_ps(vResult,T2);
- // Mul the w constant to Tangent1
- T3 = XM_PERMUTE_PS(T3,_MM_SHUFFLE(3,3,3,3));
- T3 = _mm_mul_ps(T3,Tangent1);
- vResult = _mm_add_ps(vResult,T3);
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorCatmullRom
- (
- FXMVECTOR Position0,
- FXMVECTOR Position1,
- FXMVECTOR Position2,
- GXMVECTOR Position3,
- float t
- )
- {
- // Result = ((-t^3 + 2 * t^2 - t) * Position0 +
- // (3 * t^3 - 5 * t^2 + 2) * Position1 +
- // (-3 * t^3 + 4 * t^2 + t) * Position2 +
- // (t^3 - t^2) * Position3) * 0.5
- #if defined(_XM_NO_INTRINSICS_)
- float t2 = t * t;
- float t3 = t * t2;
- XMVECTOR P0 = XMVectorReplicate((-t3 + 2.0f * t2 - t) * 0.5f);
- XMVECTOR P1 = XMVectorReplicate((3.0f * t3 - 5.0f * t2 + 2.0f) * 0.5f);
- XMVECTOR P2 = XMVectorReplicate((-3.0f * t3 + 4.0f * t2 + t) * 0.5f);
- XMVECTOR P3 = XMVectorReplicate((t3 - t2) * 0.5f);
- XMVECTOR Result = XMVectorMultiply(P0, Position0);
- Result = XMVectorMultiplyAdd(P1, Position1, Result);
- Result = XMVectorMultiplyAdd(P2, Position2, Result);
- Result = XMVectorMultiplyAdd(P3, Position3, Result);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- float t2 = t * t;
- float t3 = t * t2;
- float p0 = (-t3 + 2.0f * t2 - t) * 0.5f;
- float p1 = (3.0f * t3 - 5.0f * t2 + 2.0f) * 0.5f;
- float p2 = (-3.0f * t3 + 4.0f * t2 + t) * 0.5f;
- float p3 = (t3 - t2) * 0.5f;
- XMVECTOR P1 = vmulq_n_f32(Position1, p1);
- XMVECTOR P0 = vmlaq_n_f32(P1, Position0, p0);
- XMVECTOR P3 = vmulq_n_f32(Position3, p3);
- XMVECTOR P2 = vmlaq_n_f32(P3, Position2, p2);
- P0 = vaddq_f32(P0,P2);
- return P0;
- #elif defined(_XM_SSE_INTRINSICS_)
- float t2 = t * t;
- float t3 = t * t2;
- XMVECTOR P0 = _mm_set_ps1((-t3 + 2.0f * t2 - t) * 0.5f);
- XMVECTOR P1 = _mm_set_ps1((3.0f * t3 - 5.0f * t2 + 2.0f) * 0.5f);
- XMVECTOR P2 = _mm_set_ps1((-3.0f * t3 + 4.0f * t2 + t) * 0.5f);
- XMVECTOR P3 = _mm_set_ps1((t3 - t2) * 0.5f);
- P0 = _mm_mul_ps(P0, Position0);
- P1 = _mm_mul_ps(P1, Position1);
- P2 = _mm_mul_ps(P2, Position2);
- P3 = _mm_mul_ps(P3, Position3);
- P0 = _mm_add_ps(P0,P1);
- P2 = _mm_add_ps(P2,P3);
- P0 = _mm_add_ps(P0,P2);
- return P0;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorCatmullRomV
- (
- FXMVECTOR Position0,
- FXMVECTOR Position1,
- FXMVECTOR Position2,
- GXMVECTOR Position3,
- HXMVECTOR T
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- float fx = T.vector4_f32[0];
- float fy = T.vector4_f32[1];
- float fz = T.vector4_f32[2];
- float fw = T.vector4_f32[3];
- XMVECTORF32 vResult = { { {
- 0.5f*((-fx*fx*fx + 2 * fx*fx - fx)*Position0.vector4_f32[0]
- + (3 * fx*fx*fx - 5 * fx*fx + 2)*Position1.vector4_f32[0]
- + (-3 * fx*fx*fx + 4 * fx*fx + fx)*Position2.vector4_f32[0]
- + (fx*fx*fx - fx*fx)*Position3.vector4_f32[0]),
- 0.5f*((-fy*fy*fy + 2 * fy*fy - fy)*Position0.vector4_f32[1]
- + (3 * fy*fy*fy - 5 * fy*fy + 2)*Position1.vector4_f32[1]
- + (-3 * fy*fy*fy + 4 * fy*fy + fy)*Position2.vector4_f32[1]
- + (fy*fy*fy - fy*fy)*Position3.vector4_f32[1]),
- 0.5f*((-fz*fz*fz + 2 * fz*fz - fz)*Position0.vector4_f32[2]
- + (3 * fz*fz*fz - 5 * fz*fz + 2)*Position1.vector4_f32[2]
- + (-3 * fz*fz*fz + 4 * fz*fz + fz)*Position2.vector4_f32[2]
- + (fz*fz*fz - fz*fz)*Position3.vector4_f32[2]),
- 0.5f*((-fw*fw*fw + 2 * fw*fw - fw)*Position0.vector4_f32[3]
- + (3 * fw*fw*fw - 5 * fw*fw + 2)*Position1.vector4_f32[3]
- + (-3 * fw*fw*fw + 4 * fw*fw + fw)*Position2.vector4_f32[3]
- + (fw*fw*fw - fw*fw)*Position3.vector4_f32[3])
- } } };
- return vResult.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- static const XMVECTORF32 Catmul2 = { { { 2.0f, 2.0f, 2.0f, 2.0f } } };
- static const XMVECTORF32 Catmul3 = { { { 3.0f, 3.0f, 3.0f, 3.0f } } };
- static const XMVECTORF32 Catmul4 = { { { 4.0f, 4.0f, 4.0f, 4.0f } } };
- static const XMVECTORF32 Catmul5 = { { { 5.0f, 5.0f, 5.0f, 5.0f } } };
- // Cache T^2 and T^3
- XMVECTOR T2 = vmulq_f32(T,T);
- XMVECTOR T3 = vmulq_f32(T,T2);
- // Perform the Position0 term
- XMVECTOR vResult = vaddq_f32(T2,T2);
- vResult = vsubq_f32(vResult,T);
- vResult = vsubq_f32(vResult,T3);
- vResult = vmulq_f32(vResult,Position0);
- // Perform the Position1 term and add
- XMVECTOR vTemp = vmulq_f32(T3,Catmul3);
- vTemp = vmlsq_f32(vTemp, T2, Catmul5);
- vTemp = vaddq_f32(vTemp,Catmul2);
- vResult = vmlaq_f32(vResult, vTemp, Position1);
- // Perform the Position2 term and add
- vTemp = vmulq_f32(T2,Catmul4);
- vTemp = vmlsq_f32(vTemp, T3, Catmul3);
- vTemp = vaddq_f32(vTemp,T);
- vResult = vmlaq_f32(vResult, vTemp, Position2);
- // Position3 is the last term
- T3 = vsubq_f32(T3,T2);
- vResult = vmlaq_f32(vResult, T3, Position3);
- // Multiply by 0.5f and exit
- vResult = vmulq_f32(vResult,g_XMOneHalf);
- return vResult;
- #elif defined(_XM_SSE_INTRINSICS_)
- static const XMVECTORF32 Catmul2 = { { { 2.0f, 2.0f, 2.0f, 2.0f } } };
- static const XMVECTORF32 Catmul3 = { { { 3.0f, 3.0f, 3.0f, 3.0f } } };
- static const XMVECTORF32 Catmul4 = { { { 4.0f, 4.0f, 4.0f, 4.0f } } };
- static const XMVECTORF32 Catmul5 = { { { 5.0f, 5.0f, 5.0f, 5.0f } } };
- // Cache T^2 and T^3
- XMVECTOR T2 = _mm_mul_ps(T,T);
- XMVECTOR T3 = _mm_mul_ps(T,T2);
- // Perform the Position0 term
- XMVECTOR vResult = _mm_add_ps(T2,T2);
- vResult = _mm_sub_ps(vResult,T);
- vResult = _mm_sub_ps(vResult,T3);
- vResult = _mm_mul_ps(vResult,Position0);
- // Perform the Position1 term and add
- XMVECTOR vTemp = _mm_mul_ps(T3,Catmul3);
- XMVECTOR vTemp2 = _mm_mul_ps(T2,Catmul5);
- vTemp = _mm_sub_ps(vTemp,vTemp2);
- vTemp = _mm_add_ps(vTemp,Catmul2);
- vTemp = _mm_mul_ps(vTemp,Position1);
- vResult = _mm_add_ps(vResult,vTemp);
- // Perform the Position2 term and add
- vTemp = _mm_mul_ps(T2,Catmul4);
- vTemp2 = _mm_mul_ps(T3,Catmul3);
- vTemp = _mm_sub_ps(vTemp,vTemp2);
- vTemp = _mm_add_ps(vTemp,T);
- vTemp = _mm_mul_ps(vTemp,Position2);
- vResult = _mm_add_ps(vResult,vTemp);
- // Position3 is the last term
- T3 = _mm_sub_ps(T3,T2);
- T3 = _mm_mul_ps(T3,Position3);
- vResult = _mm_add_ps(vResult,T3);
- // Multiply by 0.5f and exit
- vResult = _mm_mul_ps(vResult,g_XMOneHalf);
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorBaryCentric
- (
- FXMVECTOR Position0,
- FXMVECTOR Position1,
- FXMVECTOR Position2,
- float f,
- float g
- )
- {
- // Result = Position0 + f * (Position1 - Position0) + g * (Position2 - Position0)
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR P10 = XMVectorSubtract(Position1, Position0);
- XMVECTOR ScaleF = XMVectorReplicate(f);
- XMVECTOR P20 = XMVectorSubtract(Position2, Position0);
- XMVECTOR ScaleG = XMVectorReplicate(g);
- XMVECTOR Result = XMVectorMultiplyAdd(P10, ScaleF, Position0);
- Result = XMVectorMultiplyAdd(P20, ScaleG, Result);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- XMVECTOR R1 = vsubq_f32(Position1,Position0);
- XMVECTOR R2 = vsubq_f32(Position2,Position0);
- R1 = vmlaq_n_f32( Position0, R1, f);
- return vmlaq_n_f32( R1, R2, g );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR R1 = _mm_sub_ps(Position1,Position0);
- XMVECTOR SF = _mm_set_ps1(f);
- XMVECTOR R2 = _mm_sub_ps(Position2,Position0);
- XMVECTOR SG = _mm_set_ps1(g);
- R1 = _mm_mul_ps(R1,SF);
- R2 = _mm_mul_ps(R2,SG);
- R1 = _mm_add_ps(R1,Position0);
- R1 = _mm_add_ps(R1,R2);
- return R1;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVectorBaryCentricV
- (
- FXMVECTOR Position0,
- FXMVECTOR Position1,
- FXMVECTOR Position2,
- GXMVECTOR F,
- HXMVECTOR G
- )
- {
- // Result = Position0 + f * (Position1 - Position0) + g * (Position2 - Position0)
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR P10 = XMVectorSubtract(Position1, Position0);
- XMVECTOR P20 = XMVectorSubtract(Position2, Position0);
- XMVECTOR Result = XMVectorMultiplyAdd(P10, F, Position0);
- Result = XMVectorMultiplyAdd(P20, G, Result);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- XMVECTOR R1 = vsubq_f32(Position1,Position0);
- XMVECTOR R2 = vsubq_f32(Position2,Position0);
- R1 = vmlaq_f32( Position0, R1, F );
- return vmlaq_f32( R1, R2, G);
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR R1 = _mm_sub_ps(Position1,Position0);
- XMVECTOR R2 = _mm_sub_ps(Position2,Position0);
- R1 = _mm_mul_ps(R1,F);
- R2 = _mm_mul_ps(R2,G);
- R1 = _mm_add_ps(R1,Position0);
- R1 = _mm_add_ps(R1,R2);
- return R1;
- #endif
- }
- /****************************************************************************
- *
- * 2D Vector
- *
- ****************************************************************************/
- //------------------------------------------------------------------------------
- // Comparison operations
- //------------------------------------------------------------------------------
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector2Equal
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (((V1.vector4_f32[0] == V2.vector4_f32[0]) && (V1.vector4_f32[1] == V2.vector4_f32[1])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x2_t vTemp = vceq_f32( vget_low_f32(V1), vget_low_f32(V2) );
- return ( vget_lane_u64( vTemp, 0 ) == 0xFFFFFFFFFFFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_cmpeq_ps(V1,V2);
- // z and w are don't care
- return (((_mm_movemask_ps(vTemp)&3)==3) != 0);
- #endif
- }
- //------------------------------------------------------------------------------
- inline uint32_t XM_CALLCONV XMVector2EqualR
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- uint32_t CR = 0;
- if ((V1.vector4_f32[0] == V2.vector4_f32[0]) &&
- (V1.vector4_f32[1] == V2.vector4_f32[1]))
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if ((V1.vector4_f32[0] != V2.vector4_f32[0]) &&
- (V1.vector4_f32[1] != V2.vector4_f32[1]))
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x2_t vTemp = vceq_f32( vget_low_f32(V1), vget_low_f32(V2) );
- uint64_t r = vget_lane_u64( vTemp, 0 );
- uint32_t CR = 0;
- if ( r == 0xFFFFFFFFFFFFFFFFU )
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if ( !r )
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_cmpeq_ps(V1,V2);
- // z and w are don't care
- int iTest = _mm_movemask_ps(vTemp)&3;
- uint32_t CR = 0;
- if (iTest==3)
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if (!iTest)
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector2EqualInt
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (((V1.vector4_u32[0] == V2.vector4_u32[0]) && (V1.vector4_u32[1] == V2.vector4_u32[1])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x2_t vTemp = vceq_u32( vget_low_u32(V1), vget_low_u32(V2) );
- return ( vget_lane_u64( vTemp, 0 ) == 0xFFFFFFFFFFFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128i vTemp = _mm_cmpeq_epi32(_mm_castps_si128(V1),_mm_castps_si128(V2));
- return (((_mm_movemask_ps(_mm_castsi128_ps(vTemp))&3)==3) != 0);
- #endif
- }
- //------------------------------------------------------------------------------
- inline uint32_t XM_CALLCONV XMVector2EqualIntR
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- uint32_t CR = 0;
- if ((V1.vector4_u32[0] == V2.vector4_u32[0]) &&
- (V1.vector4_u32[1] == V2.vector4_u32[1]))
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if ((V1.vector4_u32[0] != V2.vector4_u32[0]) &&
- (V1.vector4_u32[1] != V2.vector4_u32[1]))
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x2_t vTemp = vceq_u32( vget_low_u32(V1), vget_low_u32(V2) );
- uint64_t r = vget_lane_u64( vTemp, 0 );
- uint32_t CR = 0;
- if ( r == 0xFFFFFFFFFFFFFFFFU )
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if ( !r )
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128i vTemp = _mm_cmpeq_epi32(_mm_castps_si128(V1),_mm_castps_si128(V2));
- int iTest = _mm_movemask_ps(_mm_castsi128_ps(vTemp))&3;
- uint32_t CR = 0;
- if (iTest==3)
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if (!iTest)
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector2NearEqual
- (
- FXMVECTOR V1,
- FXMVECTOR V2,
- FXMVECTOR Epsilon
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- float dx = fabsf(V1.vector4_f32[0]-V2.vector4_f32[0]);
- float dy = fabsf(V1.vector4_f32[1]-V2.vector4_f32[1]);
- return ((dx <= Epsilon.vector4_f32[0]) &&
- (dy <= Epsilon.vector4_f32[1]));
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- float32x2_t vDelta = vsub_f32(vget_low_u32(V1), vget_low_u32(V2));
- uint32x2_t vTemp = vacle_f32( vDelta, vget_low_u32(Epsilon) );
- uint64_t r = vget_lane_u64( vTemp, 0 );
- return ( r == 0xFFFFFFFFFFFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- // Get the difference
- XMVECTOR vDelta = _mm_sub_ps(V1,V2);
- // Get the absolute value of the difference
- XMVECTOR vTemp = _mm_setzero_ps();
- vTemp = _mm_sub_ps(vTemp,vDelta);
- vTemp = _mm_max_ps(vTemp,vDelta);
- vTemp = _mm_cmple_ps(vTemp,Epsilon);
- // z and w are don't care
- return (((_mm_movemask_ps(vTemp)&3)==0x3) != 0);
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector2NotEqual
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (((V1.vector4_f32[0] != V2.vector4_f32[0]) || (V1.vector4_f32[1] != V2.vector4_f32[1])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x2_t vTemp = vceq_f32( vget_low_f32(V1), vget_low_f32(V2) );
- return ( vget_lane_u64( vTemp, 0 ) != 0xFFFFFFFFFFFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_cmpeq_ps(V1,V2);
- // z and w are don't care
- return (((_mm_movemask_ps(vTemp)&3)!=3) != 0);
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector2NotEqualInt
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (((V1.vector4_u32[0] != V2.vector4_u32[0]) || (V1.vector4_u32[1] != V2.vector4_u32[1])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x2_t vTemp = vceq_u32( vget_low_u32(V1), vget_low_u32(V2) );
- return ( vget_lane_u64( vTemp, 0 ) != 0xFFFFFFFFFFFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128i vTemp = _mm_cmpeq_epi32(_mm_castps_si128(V1),_mm_castps_si128(V2));
- return (((_mm_movemask_ps(_mm_castsi128_ps(vTemp))&3)!=3) != 0);
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector2Greater
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (((V1.vector4_f32[0] > V2.vector4_f32[0]) && (V1.vector4_f32[1] > V2.vector4_f32[1])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x2_t vTemp = vcgt_f32( vget_low_f32(V1), vget_low_f32(V2) );
- return ( vget_lane_u64( vTemp, 0 ) == 0xFFFFFFFFFFFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_cmpgt_ps(V1,V2);
- // z and w are don't care
- return (((_mm_movemask_ps(vTemp)&3)==3) != 0);
- #endif
- }
- //------------------------------------------------------------------------------
- inline uint32_t XM_CALLCONV XMVector2GreaterR
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- uint32_t CR = 0;
- if ((V1.vector4_f32[0] > V2.vector4_f32[0]) &&
- (V1.vector4_f32[1] > V2.vector4_f32[1]))
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if ((V1.vector4_f32[0] <= V2.vector4_f32[0]) &&
- (V1.vector4_f32[1] <= V2.vector4_f32[1]))
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x2_t vTemp = vcgt_f32( vget_low_f32(V1), vget_low_f32(V2) );
- uint64_t r = vget_lane_u64( vTemp, 0 );
- uint32_t CR = 0;
- if ( r == 0xFFFFFFFFFFFFFFFFU )
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if ( !r )
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_cmpgt_ps(V1,V2);
- int iTest = _mm_movemask_ps(vTemp)&3;
- uint32_t CR = 0;
- if (iTest==3)
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if (!iTest)
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector2GreaterOrEqual
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (((V1.vector4_f32[0] >= V2.vector4_f32[0]) && (V1.vector4_f32[1] >= V2.vector4_f32[1])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x2_t vTemp = vcge_f32( vget_low_f32(V1), vget_low_f32(V2) );
- return ( vget_lane_u64( vTemp, 0 ) == 0xFFFFFFFFFFFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_cmpge_ps(V1,V2);
- return (((_mm_movemask_ps(vTemp)&3)==3) != 0);
- #endif
- }
- //------------------------------------------------------------------------------
- inline uint32_t XM_CALLCONV XMVector2GreaterOrEqualR
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- uint32_t CR = 0;
- if ((V1.vector4_f32[0] >= V2.vector4_f32[0]) &&
- (V1.vector4_f32[1] >= V2.vector4_f32[1]))
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if ((V1.vector4_f32[0] < V2.vector4_f32[0]) &&
- (V1.vector4_f32[1] < V2.vector4_f32[1]))
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x2_t vTemp = vcge_f32( vget_low_f32(V1), vget_low_f32(V2) );
- uint64_t r = vget_lane_u64( vTemp, 0 );
- uint32_t CR = 0;
- if ( r == 0xFFFFFFFFFFFFFFFFU )
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if ( !r )
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_cmpge_ps(V1,V2);
- int iTest = _mm_movemask_ps(vTemp)&3;
- uint32_t CR = 0;
- if (iTest == 3)
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if (!iTest)
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector2Less
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (((V1.vector4_f32[0] < V2.vector4_f32[0]) && (V1.vector4_f32[1] < V2.vector4_f32[1])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x2_t vTemp = vclt_f32( vget_low_f32(V1), vget_low_f32(V2) );
- return ( vget_lane_u64( vTemp, 0 ) == 0xFFFFFFFFFFFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_cmplt_ps(V1,V2);
- return (((_mm_movemask_ps(vTemp)&3)==3) != 0);
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector2LessOrEqual
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (((V1.vector4_f32[0] <= V2.vector4_f32[0]) && (V1.vector4_f32[1] <= V2.vector4_f32[1])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x2_t vTemp = vcle_f32( vget_low_f32(V1), vget_low_f32(V2) );
- return ( vget_lane_u64( vTemp, 0 ) == 0xFFFFFFFFFFFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_cmple_ps(V1,V2);
- return (((_mm_movemask_ps(vTemp)&3)==3) != 0);
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector2InBounds
- (
- FXMVECTOR V,
- FXMVECTOR Bounds
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (((V.vector4_f32[0] <= Bounds.vector4_f32[0] && V.vector4_f32[0] >= -Bounds.vector4_f32[0]) &&
- (V.vector4_f32[1] <= Bounds.vector4_f32[1] && V.vector4_f32[1] >= -Bounds.vector4_f32[1])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- float32x2_t VL = vget_low_f32( V );
- float32x2_t B = vget_low_f32( Bounds );
- // Test if less than or equal
- uint32x2_t ivTemp1 = vcle_f32(VL,B);
- // Negate the bounds
- float32x2_t vTemp2 = vneg_f32(B);
- // Test if greater or equal (Reversed)
- uint32x2_t ivTemp2 = vcle_f32(vTemp2,VL);
- // Blend answers
- ivTemp1 = vand_u32(ivTemp1,ivTemp2);
- // x and y in bounds?
- return ( vget_lane_u64( ivTemp1, 0 ) == 0xFFFFFFFFFFFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- // Test if less than or equal
- XMVECTOR vTemp1 = _mm_cmple_ps(V,Bounds);
- // Negate the bounds
- XMVECTOR vTemp2 = _mm_mul_ps(Bounds,g_XMNegativeOne);
- // Test if greater or equal (Reversed)
- vTemp2 = _mm_cmple_ps(vTemp2,V);
- // Blend answers
- vTemp1 = _mm_and_ps(vTemp1,vTemp2);
- // x and y in bounds? (z and w are don't care)
- return (((_mm_movemask_ps(vTemp1)&0x3)==0x3) != 0);
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector2IsNaN
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (XMISNAN(V.vector4_f32[0]) ||
- XMISNAN(V.vector4_f32[1]));
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- float32x2_t VL = vget_low_f32( V );
- // Test against itself. NaN is always not equal
- uint32x2_t vTempNan = vceq_f32( VL, VL );
- // If x or y are NaN, the mask is zero
- return ( vget_lane_u64( vTempNan, 0 ) != 0xFFFFFFFFFFFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- // Test against itself. NaN is always not equal
- XMVECTOR vTempNan = _mm_cmpneq_ps(V,V);
- // If x or y are NaN, the mask is non-zero
- return ((_mm_movemask_ps(vTempNan)&3) != 0);
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector2IsInfinite
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (XMISINF(V.vector4_f32[0]) ||
- XMISINF(V.vector4_f32[1]));
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Mask off the sign bit
- uint32x2_t vTemp = vand_u32( vget_low_f32( V ) , vget_low_f32( g_XMAbsMask ) );
- // Compare to infinity
- vTemp = vceq_f32(vTemp, vget_low_f32( g_XMInfinity) );
- // If any are infinity, the signs are true.
- return vget_lane_u64( vTemp, 0 ) != 0;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Mask off the sign bit
- __m128 vTemp = _mm_and_ps(V,g_XMAbsMask);
- // Compare to infinity
- vTemp = _mm_cmpeq_ps(vTemp,g_XMInfinity);
- // If x or z are infinity, the signs are true.
- return ((_mm_movemask_ps(vTemp)&3) != 0);
- #endif
- }
- //------------------------------------------------------------------------------
- // Computation operations
- //------------------------------------------------------------------------------
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector2Dot
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result;
- Result.f[0] =
- Result.f[1] =
- Result.f[2] =
- Result.f[3] = V1.vector4_f32[0] * V2.vector4_f32[0] + V1.vector4_f32[1] * V2.vector4_f32[1];
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Perform the dot product on x and y
- float32x2_t vTemp = vmul_f32( vget_low_f32(V1), vget_low_f32(V2) );
- vTemp = vpadd_f32( vTemp, vTemp );
- return vcombine_f32( vTemp, vTemp );
- #elif defined(_XM_SSE4_INTRINSICS_)
- return _mm_dp_ps( V1, V2, 0x3f );
- #elif defined(_XM_SSE3_INTRINSICS_)
- XMVECTOR vDot = _mm_mul_ps(V1, V2);
- vDot = _mm_hadd_ps(vDot, vDot);
- vDot = _mm_moveldup_ps(vDot);
- return vDot;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Perform the dot product on x and y
- XMVECTOR vLengthSq = _mm_mul_ps(V1,V2);
- // vTemp has y splatted
- XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(1,1,1,1));
- // x+y
- vLengthSq = _mm_add_ss(vLengthSq,vTemp);
- vLengthSq = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(0,0,0,0));
- return vLengthSq;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector2Cross
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- // [ V1.x*V2.y - V1.y*V2.x, V1.x*V2.y - V1.y*V2.x ]
- #if defined(_XM_NO_INTRINSICS_)
- float fCross = (V1.vector4_f32[0] * V2.vector4_f32[1]) - (V1.vector4_f32[1] * V2.vector4_f32[0]);
- XMVECTORF32 vResult;
- vResult.f[0] =
- vResult.f[1] =
- vResult.f[2] =
- vResult.f[3] = fCross;
- return vResult.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- static const XMVECTORF32 Negate = { { { 1.f, -1.f, 0, 0 } } };
- float32x2_t vTemp = vmul_f32( vget_low_f32( V1 ), vrev64_f32( vget_low_f32( V2 ) ) );
- vTemp = vmul_f32( vTemp, vget_low_f32( Negate ) );
- vTemp = vpadd_f32( vTemp, vTemp );
- return vcombine_f32( vTemp, vTemp );
- #elif defined(_XM_SSE_INTRINSICS_)
- // Swap x and y
- XMVECTOR vResult = XM_PERMUTE_PS(V2,_MM_SHUFFLE(0,1,0,1));
- // Perform the muls
- vResult = _mm_mul_ps(vResult,V1);
- // Splat y
- XMVECTOR vTemp = XM_PERMUTE_PS(vResult,_MM_SHUFFLE(1,1,1,1));
- // Sub the values
- vResult = _mm_sub_ss(vResult,vTemp);
- // Splat the cross product
- vResult = XM_PERMUTE_PS(vResult,_MM_SHUFFLE(0,0,0,0));
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector2LengthSq
- (
- FXMVECTOR V
- )
- {
- return XMVector2Dot(V, V);
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector2ReciprocalLengthEst
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR Result;
- Result = XMVector2LengthSq(V);
- Result = XMVectorReciprocalSqrtEst(Result);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- float32x2_t VL = vget_low_f32(V);
- // Dot2
- float32x2_t vTemp = vmul_f32( VL, VL );
- vTemp = vpadd_f32( vTemp, vTemp );
- // Reciprocal sqrt (estimate)
- vTemp = vrsqrte_f32( vTemp );
- return vcombine_f32( vTemp, vTemp );
- #elif defined(_XM_SSE4_INTRINSICS_)
- XMVECTOR vTemp = _mm_dp_ps( V, V, 0x3f );
- return _mm_rsqrt_ps( vTemp );
- #elif defined(_XM_SSE3_INTRINSICS_)
- XMVECTOR vLengthSq = _mm_mul_ps(V, V);
- XMVECTOR vTemp = _mm_hadd_ps(vLengthSq, vLengthSq);
- vLengthSq = _mm_rsqrt_ss(vTemp);
- vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0));
- return vLengthSq;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Perform the dot product on x and y
- XMVECTOR vLengthSq = _mm_mul_ps(V,V);
- // vTemp has y splatted
- XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(1,1,1,1));
- // x+y
- vLengthSq = _mm_add_ss(vLengthSq,vTemp);
- vLengthSq = _mm_rsqrt_ss(vLengthSq);
- vLengthSq = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(0,0,0,0));
- return vLengthSq;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector2ReciprocalLength
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR Result;
- Result = XMVector2LengthSq(V);
- Result = XMVectorReciprocalSqrt(Result);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- float32x2_t VL = vget_low_f32(V);
- // Dot2
- float32x2_t vTemp = vmul_f32( VL, VL );
- vTemp = vpadd_f32( vTemp, vTemp );
- // Reciprocal sqrt
- float32x2_t S0 = vrsqrte_f32(vTemp);
- float32x2_t P0 = vmul_f32( vTemp, S0 );
- float32x2_t R0 = vrsqrts_f32( P0, S0 );
- float32x2_t S1 = vmul_f32( S0, R0 );
- float32x2_t P1 = vmul_f32( vTemp, S1 );
- float32x2_t R1 = vrsqrts_f32( P1, S1 );
- float32x2_t Result = vmul_f32( S1, R1 );
- return vcombine_f32( Result, Result );
- #elif defined(_XM_SSE4_INTRINSICS_)
- XMVECTOR vTemp = _mm_dp_ps( V, V, 0x3f );
- XMVECTOR vLengthSq = _mm_sqrt_ps( vTemp );
- return _mm_div_ps( g_XMOne, vLengthSq );
- #elif defined(_XM_SSE3_INTRINSICS_)
- XMVECTOR vLengthSq = _mm_mul_ps(V,V);
- XMVECTOR vTemp = _mm_hadd_ps(vLengthSq, vLengthSq);
- vLengthSq = _mm_sqrt_ss(vTemp);
- vLengthSq = _mm_div_ss(g_XMOne, vLengthSq);
- vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0));
- return vLengthSq;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Perform the dot product on x and y
- XMVECTOR vLengthSq = _mm_mul_ps(V,V);
- // vTemp has y splatted
- XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(1,1,1,1));
- // x+y
- vLengthSq = _mm_add_ss(vLengthSq,vTemp);
- vLengthSq = _mm_sqrt_ss(vLengthSq);
- vLengthSq = _mm_div_ss(g_XMOne,vLengthSq);
- vLengthSq = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(0,0,0,0));
- return vLengthSq;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector2LengthEst
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR Result;
- Result = XMVector2LengthSq(V);
- Result = XMVectorSqrtEst(Result);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- float32x2_t VL = vget_low_f32(V);
- // Dot2
- float32x2_t vTemp = vmul_f32( VL, VL );
- vTemp = vpadd_f32( vTemp, vTemp );
- const float32x2_t zero = vdup_n_f32(0);
- uint32x2_t VEqualsZero = vceq_f32( vTemp, zero );
- // Sqrt (estimate)
- float32x2_t Result = vrsqrte_f32( vTemp );
- Result = vmul_f32( vTemp, Result );
- Result = vbsl_f32( VEqualsZero, zero, Result );
- return vcombine_f32( Result, Result );
- #elif defined(_XM_SSE4_INTRINSICS_)
- XMVECTOR vTemp = _mm_dp_ps( V, V, 0x3f );
- return _mm_sqrt_ps( vTemp );
- #elif defined(_XM_SSE3_INTRINSICS_)
- XMVECTOR vLengthSq = _mm_mul_ps(V, V);
- XMVECTOR vTemp = _mm_hadd_ps(vLengthSq, vLengthSq);
- vLengthSq = _mm_sqrt_ss(vTemp);
- vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0));
- return vLengthSq;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Perform the dot product on x and y
- XMVECTOR vLengthSq = _mm_mul_ps(V,V);
- // vTemp has y splatted
- XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(1,1,1,1));
- // x+y
- vLengthSq = _mm_add_ss(vLengthSq,vTemp);
- vLengthSq = _mm_sqrt_ss(vLengthSq);
- vLengthSq = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(0,0,0,0));
- return vLengthSq;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector2Length
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR Result;
- Result = XMVector2LengthSq(V);
- Result = XMVectorSqrt(Result);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- float32x2_t VL = vget_low_f32(V);
- // Dot2
- float32x2_t vTemp = vmul_f32( VL, VL );
- vTemp = vpadd_f32( vTemp, vTemp );
- const float32x2_t zero = vdup_n_f32(0);
- uint32x2_t VEqualsZero = vceq_f32( vTemp, zero );
- // Sqrt
- float32x2_t S0 = vrsqrte_f32( vTemp );
- float32x2_t P0 = vmul_f32( vTemp, S0 );
- float32x2_t R0 = vrsqrts_f32( P0, S0 );
- float32x2_t S1 = vmul_f32( S0, R0 );
- float32x2_t P1 = vmul_f32( vTemp, S1 );
- float32x2_t R1 = vrsqrts_f32( P1, S1 );
- float32x2_t Result = vmul_f32( S1, R1 );
- Result = vmul_f32( vTemp, Result );
- Result = vbsl_f32( VEqualsZero, zero, Result );
- return vcombine_f32( Result, Result );
- #elif defined(_XM_SSE4_INTRINSICS_)
- XMVECTOR vTemp = _mm_dp_ps( V, V, 0x3f );
- return _mm_sqrt_ps( vTemp );
- #elif defined(_XM_SSE3_INTRINSICS_)
- XMVECTOR vLengthSq = _mm_mul_ps(V, V);
- XMVECTOR vTemp = _mm_hadd_ps(vLengthSq, vLengthSq);
- vLengthSq = _mm_sqrt_ss(vTemp);
- vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0));
- return vLengthSq;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Perform the dot product on x and y
- XMVECTOR vLengthSq = _mm_mul_ps(V,V);
- // vTemp has y splatted
- XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(1,1,1,1));
- // x+y
- vLengthSq = _mm_add_ss(vLengthSq,vTemp);
- vLengthSq = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(0,0,0,0));
- vLengthSq = _mm_sqrt_ps(vLengthSq);
- return vLengthSq;
- #endif
- }
- //------------------------------------------------------------------------------
- // XMVector2NormalizeEst uses a reciprocal estimate and
- // returns QNaN on zero and infinite vectors.
- inline XMVECTOR XM_CALLCONV XMVector2NormalizeEst
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR Result;
- Result = XMVector2ReciprocalLength(V);
- Result = XMVectorMultiply(V, Result);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- float32x2_t VL = vget_low_f32(V);
- // Dot2
- float32x2_t vTemp = vmul_f32( VL, VL );
- vTemp = vpadd_f32( vTemp, vTemp );
- // Reciprocal sqrt (estimate)
- vTemp = vrsqrte_f32( vTemp );
- // Normalize
- float32x2_t Result = vmul_f32( VL, vTemp );
- return vcombine_f32( Result, Result );
- #elif defined(_XM_SSE4_INTRINSICS_)
- XMVECTOR vTemp = _mm_dp_ps( V, V, 0x3f );
- XMVECTOR vResult = _mm_rsqrt_ps( vTemp );
- return _mm_mul_ps(vResult, V);
- #elif defined(_XM_SSE3_INTRINSICS_)
- XMVECTOR vLengthSq = _mm_mul_ps(V, V);
- vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
- vLengthSq = _mm_rsqrt_ss(vLengthSq);
- vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0));
- vLengthSq = _mm_mul_ps(vLengthSq, V);
- return vLengthSq;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Perform the dot product on x and y
- XMVECTOR vLengthSq = _mm_mul_ps(V,V);
- // vTemp has y splatted
- XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(1,1,1,1));
- // x+y
- vLengthSq = _mm_add_ss(vLengthSq,vTemp);
- vLengthSq = _mm_rsqrt_ss(vLengthSq);
- vLengthSq = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(0,0,0,0));
- vLengthSq = _mm_mul_ps(vLengthSq,V);
- return vLengthSq;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector2Normalize
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR vResult = XMVector2Length( V );
- float fLength = vResult.vector4_f32[0];
- // Prevent divide by zero
- if (fLength > 0) {
- fLength = 1.0f/fLength;
- }
-
- vResult.vector4_f32[0] = V.vector4_f32[0]*fLength;
- vResult.vector4_f32[1] = V.vector4_f32[1]*fLength;
- vResult.vector4_f32[2] = V.vector4_f32[2]*fLength;
- vResult.vector4_f32[3] = V.vector4_f32[3]*fLength;
- return vResult;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- float32x2_t VL = vget_low_f32(V);
- // Dot2
- float32x2_t vTemp = vmul_f32( VL, VL );
- vTemp = vpadd_f32( vTemp, vTemp );
- uint32x2_t VEqualsZero = vceq_f32( vTemp, vdup_n_f32(0) );
- uint32x2_t VEqualsInf = vceq_f32( vTemp, vget_low_f32(g_XMInfinity) );
- // Reciprocal sqrt (2 iterations of Newton-Raphson)
- float32x2_t S0 = vrsqrte_f32( vTemp );
- float32x2_t P0 = vmul_f32( vTemp, S0 );
- float32x2_t R0 = vrsqrts_f32( P0, S0 );
- float32x2_t S1 = vmul_f32( S0, R0 );
- float32x2_t P1 = vmul_f32( vTemp, S1 );
- float32x2_t R1 = vrsqrts_f32( P1, S1 );
- vTemp = vmul_f32( S1, R1 );
- // Normalize
- float32x2_t Result = vmul_f32( VL, vTemp );
- Result = vbsl_f32( VEqualsZero, vdup_n_f32(0), Result );
- Result = vbsl_f32( VEqualsInf, vget_low_f32(g_XMQNaN), Result );
- return vcombine_f32( Result, Result );
- #elif defined(_XM_SSE4_INTRINSICS_)
- XMVECTOR vLengthSq = _mm_dp_ps( V, V, 0x3f );
- // Prepare for the division
- XMVECTOR vResult = _mm_sqrt_ps(vLengthSq);
- // Create zero with a single instruction
- XMVECTOR vZeroMask = _mm_setzero_ps();
- // Test for a divide by zero (Must be FP to detect -0.0)
- vZeroMask = _mm_cmpneq_ps(vZeroMask,vResult);
- // Failsafe on zero (Or epsilon) length planes
- // If the length is infinity, set the elements to zero
- vLengthSq = _mm_cmpneq_ps(vLengthSq,g_XMInfinity);
- // Reciprocal mul to perform the normalization
- vResult = _mm_div_ps(V,vResult);
- // Any that are infinity, set to zero
- vResult = _mm_and_ps(vResult,vZeroMask);
- // Select qnan or result based on infinite length
- XMVECTOR vTemp1 = _mm_andnot_ps(vLengthSq,g_XMQNaN);
- XMVECTOR vTemp2 = _mm_and_ps(vResult,vLengthSq);
- vResult = _mm_or_ps(vTemp1,vTemp2);
- return vResult;
- #elif defined(_XM_SSE3_INTRINSICS_)
- // Perform the dot product on x and y only
- XMVECTOR vLengthSq = _mm_mul_ps(V, V);
- vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
- vLengthSq = _mm_moveldup_ps(vLengthSq);
- // Prepare for the division
- XMVECTOR vResult = _mm_sqrt_ps(vLengthSq);
- // Create zero with a single instruction
- XMVECTOR vZeroMask = _mm_setzero_ps();
- // Test for a divide by zero (Must be FP to detect -0.0)
- vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult);
- // Failsafe on zero (Or epsilon) length planes
- // If the length is infinity, set the elements to zero
- vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity);
- // Reciprocal mul to perform the normalization
- vResult = _mm_div_ps(V, vResult);
- // Any that are infinity, set to zero
- vResult = _mm_and_ps(vResult, vZeroMask);
- // Select qnan or result based on infinite length
- XMVECTOR vTemp1 = _mm_andnot_ps(vLengthSq, g_XMQNaN);
- XMVECTOR vTemp2 = _mm_and_ps(vResult, vLengthSq);
- vResult = _mm_or_ps(vTemp1, vTemp2);
- return vResult;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Perform the dot product on x and y only
- XMVECTOR vLengthSq = _mm_mul_ps(V,V);
- XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(1,1,1,1));
- vLengthSq = _mm_add_ss(vLengthSq,vTemp);
- vLengthSq = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(0,0,0,0));
- // Prepare for the division
- XMVECTOR vResult = _mm_sqrt_ps(vLengthSq);
- // Create zero with a single instruction
- XMVECTOR vZeroMask = _mm_setzero_ps();
- // Test for a divide by zero (Must be FP to detect -0.0)
- vZeroMask = _mm_cmpneq_ps(vZeroMask,vResult);
- // Failsafe on zero (Or epsilon) length planes
- // If the length is infinity, set the elements to zero
- vLengthSq = _mm_cmpneq_ps(vLengthSq,g_XMInfinity);
- // Reciprocal mul to perform the normalization
- vResult = _mm_div_ps(V,vResult);
- // Any that are infinity, set to zero
- vResult = _mm_and_ps(vResult,vZeroMask);
- // Select qnan or result based on infinite length
- XMVECTOR vTemp1 = _mm_andnot_ps(vLengthSq,g_XMQNaN);
- XMVECTOR vTemp2 = _mm_and_ps(vResult,vLengthSq);
- vResult = _mm_or_ps(vTemp1,vTemp2);
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector2ClampLength
- (
- FXMVECTOR V,
- float LengthMin,
- float LengthMax
- )
- {
- XMVECTOR ClampMax = XMVectorReplicate(LengthMax);
- XMVECTOR ClampMin = XMVectorReplicate(LengthMin);
- return XMVector2ClampLengthV(V, ClampMin, ClampMax);
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector2ClampLengthV
- (
- FXMVECTOR V,
- FXMVECTOR LengthMin,
- FXMVECTOR LengthMax
- )
- {
- assert((XMVectorGetY(LengthMin) == XMVectorGetX(LengthMin)));
- assert((XMVectorGetY(LengthMax) == XMVectorGetX(LengthMax)));
- assert(XMVector2GreaterOrEqual(LengthMin, g_XMZero));
- assert(XMVector2GreaterOrEqual(LengthMax, g_XMZero));
- assert(XMVector2GreaterOrEqual(LengthMax, LengthMin));
- XMVECTOR LengthSq = XMVector2LengthSq(V);
- const XMVECTOR Zero = XMVectorZero();
- XMVECTOR RcpLength = XMVectorReciprocalSqrt(LengthSq);
- XMVECTOR InfiniteLength = XMVectorEqualInt(LengthSq, g_XMInfinity.v);
- XMVECTOR ZeroLength = XMVectorEqual(LengthSq, Zero);
- XMVECTOR Length = XMVectorMultiply(LengthSq, RcpLength);
- XMVECTOR Normal = XMVectorMultiply(V, RcpLength);
- XMVECTOR Select = XMVectorEqualInt(InfiniteLength, ZeroLength);
- Length = XMVectorSelect(LengthSq, Length, Select);
- Normal = XMVectorSelect(LengthSq, Normal, Select);
- XMVECTOR ControlMax = XMVectorGreater(Length, LengthMax);
- XMVECTOR ControlMin = XMVectorLess(Length, LengthMin);
- XMVECTOR ClampLength = XMVectorSelect(Length, LengthMax, ControlMax);
- ClampLength = XMVectorSelect(ClampLength, LengthMin, ControlMin);
- XMVECTOR Result = XMVectorMultiply(Normal, ClampLength);
- // Preserve the original vector (with no precision loss) if the length falls within the given range
- XMVECTOR Control = XMVectorEqualInt(ControlMax, ControlMin);
- Result = XMVectorSelect(Result, V, Control);
- return Result;
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector2Reflect
- (
- FXMVECTOR Incident,
- FXMVECTOR Normal
- )
- {
- // Result = Incident - (2 * dot(Incident, Normal)) * Normal
- XMVECTOR Result;
- Result = XMVector2Dot(Incident, Normal);
- Result = XMVectorAdd(Result, Result);
- Result = XMVectorNegativeMultiplySubtract(Result, Normal, Incident);
- return Result;
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector2Refract
- (
- FXMVECTOR Incident,
- FXMVECTOR Normal,
- float RefractionIndex
- )
- {
- XMVECTOR Index = XMVectorReplicate(RefractionIndex);
- return XMVector2RefractV(Incident, Normal, Index);
- }
- //------------------------------------------------------------------------------
- // Return the refraction of a 2D vector
- inline XMVECTOR XM_CALLCONV XMVector2RefractV
- (
- FXMVECTOR Incident,
- FXMVECTOR Normal,
- FXMVECTOR RefractionIndex
- )
- {
- // Result = RefractionIndex * Incident - Normal * (RefractionIndex * dot(Incident, Normal) +
- // sqrt(1 - RefractionIndex * RefractionIndex * (1 - dot(Incident, Normal) * dot(Incident, Normal))))
- #if defined(_XM_NO_INTRINSICS_)
- float IDotN = (Incident.vector4_f32[0]*Normal.vector4_f32[0])+(Incident.vector4_f32[1]*Normal.vector4_f32[1]);
- // R = 1.0f - RefractionIndex * RefractionIndex * (1.0f - IDotN * IDotN)
- float RY = 1.0f-(IDotN*IDotN);
- float RX = 1.0f-(RY*RefractionIndex.vector4_f32[0]*RefractionIndex.vector4_f32[0]);
- RY = 1.0f-(RY*RefractionIndex.vector4_f32[1]*RefractionIndex.vector4_f32[1]);
- if (RX>=0.0f) {
- RX = (RefractionIndex.vector4_f32[0]*Incident.vector4_f32[0])-(Normal.vector4_f32[0]*((RefractionIndex.vector4_f32[0]*IDotN)+sqrtf(RX)));
- } else {
- RX = 0.0f;
- }
- if (RY>=0.0f) {
- RY = (RefractionIndex.vector4_f32[1]*Incident.vector4_f32[1])-(Normal.vector4_f32[1]*((RefractionIndex.vector4_f32[1]*IDotN)+sqrtf(RY)));
- } else {
- RY = 0.0f;
- }
- XMVECTOR vResult;
- vResult.vector4_f32[0] = RX;
- vResult.vector4_f32[1] = RY;
- vResult.vector4_f32[2] = 0.0f;
- vResult.vector4_f32[3] = 0.0f;
- return vResult;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- float32x2_t IL = vget_low_f32( Incident );
- float32x2_t NL = vget_low_f32( Normal );
- float32x2_t RIL = vget_low_f32( RefractionIndex );
- // Get the 2D Dot product of Incident-Normal
- float32x2_t vTemp = vmul_f32(IL, NL);
- float32x2_t IDotN = vpadd_f32( vTemp, vTemp );
- // vTemp = 1.0f - RefractionIndex * RefractionIndex * (1.0f - IDotN * IDotN)
- vTemp = vmls_f32( vget_low_f32( g_XMOne ), IDotN, IDotN);
- vTemp = vmul_f32(vTemp,RIL);
- vTemp = vmls_f32(vget_low_f32( g_XMOne ), vTemp, RIL );
- // If any terms are <=0, sqrt() will fail, punt to zero
- uint32x2_t vMask = vcgt_f32(vTemp, vget_low_f32(g_XMZero) );
- // Sqrt(vTemp)
- float32x2_t S0 = vrsqrte_f32(vTemp);
- float32x2_t P0 = vmul_f32( vTemp, S0 );
- float32x2_t R0 = vrsqrts_f32( P0, S0 );
- float32x2_t S1 = vmul_f32( S0, R0 );
- float32x2_t P1 = vmul_f32( vTemp, S1 );
- float32x2_t R1 = vrsqrts_f32( P1, S1 );
- float32x2_t S2 = vmul_f32( S1, R1 );
- vTemp = vmul_f32( vTemp, S2 );
- // R = RefractionIndex * IDotN + sqrt(R)
- vTemp = vmla_f32( vTemp, RIL, IDotN );
- // Result = RefractionIndex * Incident - Normal * R
- float32x2_t vResult = vmul_f32(RIL,IL);
- vResult = vmls_f32( vResult, vTemp, NL );
- vResult = vand_u32(vResult,vMask);
- return vcombine_f32(vResult, vResult);
- #elif defined(_XM_SSE_INTRINSICS_)
- // Result = RefractionIndex * Incident - Normal * (RefractionIndex * dot(Incident, Normal) +
- // sqrt(1 - RefractionIndex * RefractionIndex * (1 - dot(Incident, Normal) * dot(Incident, Normal))))
- // Get the 2D Dot product of Incident-Normal
- XMVECTOR IDotN = XMVector2Dot(Incident, Normal);
- // vTemp = 1.0f - RefractionIndex * RefractionIndex * (1.0f - IDotN * IDotN)
- XMVECTOR vTemp = _mm_mul_ps(IDotN,IDotN);
- vTemp = _mm_sub_ps(g_XMOne,vTemp);
- vTemp = _mm_mul_ps(vTemp,RefractionIndex);
- vTemp = _mm_mul_ps(vTemp,RefractionIndex);
- vTemp = _mm_sub_ps(g_XMOne,vTemp);
- // If any terms are <=0, sqrt() will fail, punt to zero
- XMVECTOR vMask = _mm_cmpgt_ps(vTemp,g_XMZero);
- // R = RefractionIndex * IDotN + sqrt(R)
- vTemp = _mm_sqrt_ps(vTemp);
- XMVECTOR vResult = _mm_mul_ps(RefractionIndex,IDotN);
- vTemp = _mm_add_ps(vTemp,vResult);
- // Result = RefractionIndex * Incident - Normal * R
- vResult = _mm_mul_ps(RefractionIndex,Incident);
- vTemp = _mm_mul_ps(vTemp,Normal);
- vResult = _mm_sub_ps(vResult,vTemp);
- vResult = _mm_and_ps(vResult,vMask);
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector2Orthogonal
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- -V.vector4_f32[1],
- V.vector4_f32[0],
- 0.f,
- 0.f
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- static const XMVECTORF32 Negate = { { { -1.f, 1.f, 0, 0 } } };
- const float32x2_t zero = vdup_n_f32(0);
- float32x2_t VL = vget_low_f32( V );
- float32x2_t Result = vmul_f32( vrev64_f32( VL ), vget_low_f32( Negate ) );
- return vcombine_f32( Result, zero );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vResult = XM_PERMUTE_PS(V,_MM_SHUFFLE(3,2,0,1));
- vResult = _mm_mul_ps(vResult,g_XMNegateX);
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector2AngleBetweenNormalsEst
- (
- FXMVECTOR N1,
- FXMVECTOR N2
- )
- {
- XMVECTOR Result = XMVector2Dot(N1, N2);
- Result = XMVectorClamp(Result, g_XMNegativeOne.v, g_XMOne.v);
- Result = XMVectorACosEst(Result);
- return Result;
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector2AngleBetweenNormals
- (
- FXMVECTOR N1,
- FXMVECTOR N2
- )
- {
- XMVECTOR Result = XMVector2Dot(N1, N2);
- Result = XMVectorClamp(Result, g_XMNegativeOne, g_XMOne);
- Result = XMVectorACos(Result);
- return Result;
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector2AngleBetweenVectors
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- XMVECTOR L1 = XMVector2ReciprocalLength(V1);
- XMVECTOR L2 = XMVector2ReciprocalLength(V2);
- XMVECTOR Dot = XMVector2Dot(V1, V2);
- L1 = XMVectorMultiply(L1, L2);
- XMVECTOR CosAngle = XMVectorMultiply(Dot, L1);
- CosAngle = XMVectorClamp(CosAngle, g_XMNegativeOne.v, g_XMOne.v);
- return XMVectorACos(CosAngle);
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector2LinePointDistance
- (
- FXMVECTOR LinePoint1,
- FXMVECTOR LinePoint2,
- FXMVECTOR Point
- )
- {
- // Given a vector PointVector from LinePoint1 to Point and a vector
- // LineVector from LinePoint1 to LinePoint2, the scaled distance
- // PointProjectionScale from LinePoint1 to the perpendicular projection
- // of PointVector onto the line is defined as:
- //
- // PointProjectionScale = dot(PointVector, LineVector) / LengthSq(LineVector)
- XMVECTOR PointVector = XMVectorSubtract(Point, LinePoint1);
- XMVECTOR LineVector = XMVectorSubtract(LinePoint2, LinePoint1);
- XMVECTOR LengthSq = XMVector2LengthSq(LineVector);
- XMVECTOR PointProjectionScale = XMVector2Dot(PointVector, LineVector);
- PointProjectionScale = XMVectorDivide(PointProjectionScale, LengthSq);
- XMVECTOR DistanceVector = XMVectorMultiply(LineVector, PointProjectionScale);
- DistanceVector = XMVectorSubtract(PointVector, DistanceVector);
- return XMVector2Length(DistanceVector);
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector2IntersectLine
- (
- FXMVECTOR Line1Point1,
- FXMVECTOR Line1Point2,
- FXMVECTOR Line2Point1,
- GXMVECTOR Line2Point2
- )
- {
- #if defined(_XM_NO_INTRINSICS_) || defined(_XM_ARM_NEON_INTRINSICS_)
- XMVECTOR V1 = XMVectorSubtract(Line1Point2, Line1Point1);
- XMVECTOR V2 = XMVectorSubtract(Line2Point2, Line2Point1);
- XMVECTOR V3 = XMVectorSubtract(Line1Point1, Line2Point1);
- XMVECTOR C1 = XMVector2Cross(V1, V2);
- XMVECTOR C2 = XMVector2Cross(V2, V3);
- XMVECTOR Result;
- const XMVECTOR Zero = XMVectorZero();
- if (XMVector2NearEqual(C1, Zero, g_XMEpsilon.v))
- {
- if (XMVector2NearEqual(C2, Zero, g_XMEpsilon.v))
- {
- // Coincident
- Result = g_XMInfinity.v;
- }
- else
- {
- // Parallel
- Result = g_XMQNaN.v;
- }
- }
- else
- {
- // Intersection point = Line1Point1 + V1 * (C2 / C1)
- XMVECTOR Scale = XMVectorReciprocal(C1);
- Scale = XMVectorMultiply(C2, Scale);
- Result = XMVectorMultiplyAdd(V1, Scale, Line1Point1);
- }
- return Result;
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR V1 = _mm_sub_ps(Line1Point2, Line1Point1);
- XMVECTOR V2 = _mm_sub_ps(Line2Point2, Line2Point1);
- XMVECTOR V3 = _mm_sub_ps(Line1Point1, Line2Point1);
- // Generate the cross products
- XMVECTOR C1 = XMVector2Cross(V1, V2);
- XMVECTOR C2 = XMVector2Cross(V2, V3);
- // If C1 is not close to epsilon, use the calculated value
- XMVECTOR vResultMask = _mm_setzero_ps();
- vResultMask = _mm_sub_ps(vResultMask,C1);
- vResultMask = _mm_max_ps(vResultMask,C1);
- // 0xFFFFFFFF if the calculated value is to be used
- vResultMask = _mm_cmpgt_ps(vResultMask,g_XMEpsilon);
- // If C1 is close to epsilon, which fail type is it? INFINITY or NAN?
- XMVECTOR vFailMask = _mm_setzero_ps();
- vFailMask = _mm_sub_ps(vFailMask,C2);
- vFailMask = _mm_max_ps(vFailMask,C2);
- vFailMask = _mm_cmple_ps(vFailMask,g_XMEpsilon);
- XMVECTOR vFail = _mm_and_ps(vFailMask,g_XMInfinity);
- vFailMask = _mm_andnot_ps(vFailMask,g_XMQNaN);
- // vFail is NAN or INF
- vFail = _mm_or_ps(vFail,vFailMask);
- // Intersection point = Line1Point1 + V1 * (C2 / C1)
- XMVECTOR vResult = _mm_div_ps(C2,C1);
- vResult = _mm_mul_ps(vResult,V1);
- vResult = _mm_add_ps(vResult,Line1Point1);
- // Use result, or failure value
- vResult = _mm_and_ps(vResult,vResultMask);
- vResultMask = _mm_andnot_ps(vResultMask,vFail);
- vResult = _mm_or_ps(vResult,vResultMask);
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector2Transform
- (
- FXMVECTOR V,
- FXMMATRIX M
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR Y = XMVectorSplatY(V);
- XMVECTOR X = XMVectorSplatX(V);
- XMVECTOR Result = XMVectorMultiplyAdd(Y, M.r[1], M.r[3]);
- Result = XMVectorMultiplyAdd(X, M.r[0], Result);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- float32x2_t VL = vget_low_f32( V );
- float32x4_t Result = vmlaq_lane_f32( M.r[3], M.r[1], VL, 1 ); // Y
- return vmlaq_lane_f32( Result, M.r[0], VL, 0 ); // X
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vResult = XM_PERMUTE_PS(V,_MM_SHUFFLE(0,0,0,0));
- vResult = _mm_mul_ps(vResult,M.r[0]);
- XMVECTOR vTemp = XM_PERMUTE_PS(V,_MM_SHUFFLE(1,1,1,1));
- vTemp = _mm_mul_ps(vTemp,M.r[1]);
- vResult = _mm_add_ps(vResult,vTemp);
- vResult = _mm_add_ps(vResult,M.r[3]);
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- _Use_decl_annotations_
- inline XMFLOAT4* XM_CALLCONV XMVector2TransformStream
- (
- XMFLOAT4* pOutputStream,
- size_t OutputStride,
- const XMFLOAT2* pInputStream,
- size_t InputStride,
- size_t VectorCount,
- FXMMATRIX M
- )
- {
- assert(pOutputStream != nullptr);
- assert(pInputStream != nullptr);
- assert(InputStride >= sizeof(XMFLOAT2));
- _Analysis_assume_(InputStride >= sizeof(XMFLOAT2));
- assert(OutputStride >= sizeof(XMFLOAT4));
- _Analysis_assume_(OutputStride >= sizeof(XMFLOAT4));
- #if defined(_XM_NO_INTRINSICS_)
- const uint8_t* pInputVector = (const uint8_t*)pInputStream;
- uint8_t* pOutputVector = (uint8_t*)pOutputStream;
- const XMVECTOR row0 = M.r[0];
- const XMVECTOR row1 = M.r[1];
- const XMVECTOR row3 = M.r[3];
- for (size_t i = 0; i < VectorCount; i++)
- {
- XMVECTOR V = XMLoadFloat2((const XMFLOAT2*)pInputVector);
- XMVECTOR Y = XMVectorSplatY(V);
- XMVECTOR X = XMVectorSplatX(V);
- XMVECTOR Result = XMVectorMultiplyAdd(Y, row1, row3);
- Result = XMVectorMultiplyAdd(X, row0, Result);
- #ifdef _PREFAST_
- #pragma prefast(push)
- #pragma prefast(disable : 26015, "PREfast noise: Esp:1307" )
- #endif
- XMStoreFloat4((XMFLOAT4*)pOutputVector, Result);
- #ifdef _PREFAST_
- #pragma prefast(pop)
- #endif
- pInputVector += InputStride;
- pOutputVector += OutputStride;
- }
- return pOutputStream;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- const uint8_t* pInputVector = (const uint8_t*)pInputStream;
- uint8_t* pOutputVector = (uint8_t*)pOutputStream;
- const XMVECTOR row0 = M.r[0];
- const XMVECTOR row1 = M.r[1];
- const XMVECTOR row3 = M.r[3];
- size_t i = 0;
- size_t four = VectorCount >> 2;
- if ( four > 0 )
- {
- if ((InputStride == sizeof(XMFLOAT2)) && (OutputStride == sizeof(XMFLOAT4)))
- {
- for (size_t j = 0; j < four; ++j)
- {
- float32x4x2_t V = vld2q_f32( reinterpret_cast<const float*>(pInputVector) );
- pInputVector += sizeof(XMFLOAT2)*4;
- float32x2_t r3 = vget_low_f32( row3 );
- float32x2_t r = vget_low_f32( row0 );
- XMVECTOR vResult0 = vmlaq_lane_f32( vdupq_lane_f32( r3, 0 ), V.val[0], r, 0 ); // Ax+M
- XMVECTOR vResult1 = vmlaq_lane_f32( vdupq_lane_f32( r3, 1 ), V.val[0], r, 1 ); // Bx+N
- __prefetch( pInputVector );
- r3 = vget_high_f32( row3 );
- r = vget_high_f32( row0 );
- XMVECTOR vResult2 = vmlaq_lane_f32( vdupq_lane_f32( r3, 0 ), V.val[0], r, 0 ); // Cx+O
- XMVECTOR vResult3 = vmlaq_lane_f32( vdupq_lane_f32( r3, 1 ), V.val[0], r, 1 ); // Dx+P
-
- __prefetch( pInputVector+XM_CACHE_LINE_SIZE );
- r = vget_low_f32( row1 );
- vResult0 = vmlaq_lane_f32( vResult0, V.val[1], r, 0 ); // Ax+Ey+M
- vResult1 = vmlaq_lane_f32( vResult1, V.val[1], r, 1 ); // Bx+Fy+N
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*2) );
- r = vget_high_f32( row1 );
- vResult2 = vmlaq_lane_f32( vResult2, V.val[1], r, 0 ); // Cx+Gy+O
- vResult3 = vmlaq_lane_f32( vResult3, V.val[1], r, 1 ); // Dx+Hy+P
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*3) );
- float32x4x4_t R;
- R.val[0] = vResult0;
- R.val[1] = vResult1;
- R.val[2] = vResult2;
- R.val[3] = vResult3;
- vst4q_f32( reinterpret_cast<float*>(pOutputVector), R );
- pOutputVector += sizeof(XMFLOAT4)*4;
- i += 4;
- }
- }
- }
- for (; i < VectorCount; i++)
- {
- float32x2_t V = vld1_f32( reinterpret_cast<const float*>(pInputVector) );
- pInputVector += InputStride;
- XMVECTOR vResult = vmlaq_lane_f32( row3, row0, V, 0 ); // X
- vResult = vmlaq_lane_f32( vResult, row1, V, 1 ); // Y
- vst1q_f32( reinterpret_cast<float*>(pOutputVector), vResult );
- pOutputVector += OutputStride;
- }
- return pOutputStream;
- #elif defined(_XM_SSE_INTRINSICS_)
- const uint8_t* pInputVector = (const uint8_t*)pInputStream;
- uint8_t* pOutputVector = (uint8_t*)pOutputStream;
- const XMVECTOR row0 = M.r[0];
- const XMVECTOR row1 = M.r[1];
- const XMVECTOR row3 = M.r[3];
- size_t i = 0;
- size_t two = VectorCount >> 1;
- if ( two > 0 )
- {
- if ( InputStride == sizeof(XMFLOAT2) )
- {
- if ( !((uintptr_t)pOutputStream & 0xF) && !(OutputStride & 0xF) )
- {
- // Packed input, aligned output
- for (size_t j = 0; j < two; ++j)
- {
- XMVECTOR V = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector) );
- pInputVector += sizeof(XMFLOAT2)*2;
- XMVECTOR Y = XM_PERMUTE_PS(V,_MM_SHUFFLE(1,1,1,1));
- XMVECTOR X = XM_PERMUTE_PS(V,_MM_SHUFFLE(0,0,0,0));
- XMVECTOR vTemp = _mm_mul_ps( Y, row1 );
- XMVECTOR vTemp2 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- XM_STREAM_PS( reinterpret_cast<float*>(pOutputVector), vTemp );
- pOutputVector += OutputStride;
- Y = XM_PERMUTE_PS(V,_MM_SHUFFLE(3,3,3,3));
- X = XM_PERMUTE_PS(V,_MM_SHUFFLE(2,2,2,2));
- vTemp = _mm_mul_ps( Y, row1 );
- vTemp2 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- XM_STREAM_PS( reinterpret_cast<float*>(pOutputVector), vTemp );
- pOutputVector += OutputStride;
- i += 2;
- }
- }
- else
- {
- // Packed input, unaligned output
- for (size_t j = 0; j < two; ++j)
- {
- XMVECTOR V = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector) );
- pInputVector += sizeof(XMFLOAT2)*2;
- XMVECTOR Y = XM_PERMUTE_PS(V,_MM_SHUFFLE(1,1,1,1));
- XMVECTOR X = XM_PERMUTE_PS(V,_MM_SHUFFLE(0,0,0,0));
- XMVECTOR vTemp = _mm_mul_ps( Y, row1 );
- XMVECTOR vTemp2 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- _mm_storeu_ps( reinterpret_cast<float*>(pOutputVector), vTemp );
- pOutputVector += OutputStride;
- Y = XM_PERMUTE_PS(V,_MM_SHUFFLE(3,3,3,3));
- X = XM_PERMUTE_PS(V,_MM_SHUFFLE(2,2,2,2));
- vTemp = _mm_mul_ps( Y, row1 );
- vTemp2 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- _mm_storeu_ps( reinterpret_cast<float*>(pOutputVector), vTemp );
- pOutputVector += OutputStride;
- i += 2;
- }
- }
- }
- }
- if ( !((uintptr_t)pInputVector & 0xF) && !(InputStride & 0xF) )
- {
- if ( !((uintptr_t)pOutputStream & 0xF) && !(OutputStride & 0xF) )
- {
- // Aligned input, aligned output
- for (; i < VectorCount; i++)
- {
- XMVECTOR V = _mm_castsi128_ps( _mm_loadl_epi64( reinterpret_cast<const __m128i*>(pInputVector) ) );
- pInputVector += InputStride;
- XMVECTOR Y = XM_PERMUTE_PS(V,_MM_SHUFFLE(1,1,1,1));
- XMVECTOR X = XM_PERMUTE_PS(V,_MM_SHUFFLE(0,0,0,0));
- XMVECTOR vTemp = _mm_mul_ps( Y, row1 );
- XMVECTOR vTemp2 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- XM_STREAM_PS( reinterpret_cast<float*>(pOutputVector), vTemp );
- pOutputVector += OutputStride;
- }
- }
- else
- {
- // Aligned input, unaligned output
- for (; i < VectorCount; i++)
- {
- XMVECTOR V = _mm_castsi128_ps( _mm_loadl_epi64( reinterpret_cast<const __m128i*>(pInputVector) ) );
- pInputVector += InputStride;
- XMVECTOR Y = XM_PERMUTE_PS(V,_MM_SHUFFLE(1,1,1,1));
- XMVECTOR X = XM_PERMUTE_PS(V,_MM_SHUFFLE(0,0,0,0));
- XMVECTOR vTemp = _mm_mul_ps( Y, row1 );
- XMVECTOR vTemp2 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- _mm_storeu_ps( reinterpret_cast<float*>(pOutputVector), vTemp );
- pOutputVector += OutputStride;
- }
- }
- }
- else
- {
- // Unaligned input
- for (; i < VectorCount; i++)
- {
- __m128 x = _mm_load_ss( reinterpret_cast<const float*>(pInputVector) );
- __m128 y = _mm_load_ss( reinterpret_cast<const float*>(pInputVector+4) );
- pInputVector += InputStride;
- XMVECTOR Y = XM_PERMUTE_PS(y,_MM_SHUFFLE(0,0,0,0));
- XMVECTOR X = XM_PERMUTE_PS(x,_MM_SHUFFLE(0,0,0,0));
- XMVECTOR vTemp = _mm_mul_ps( Y, row1 );
- XMVECTOR vTemp2 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- _mm_storeu_ps( reinterpret_cast<float*>(pOutputVector), vTemp );
- pOutputVector += OutputStride;
- }
- }
- XM_SFENCE();
- return pOutputStream;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector2TransformCoord
- (
- FXMVECTOR V,
- FXMMATRIX M
- )
- {
- XMVECTOR Y = XMVectorSplatY(V);
- XMVECTOR X = XMVectorSplatX(V);
- XMVECTOR Result = XMVectorMultiplyAdd(Y, M.r[1], M.r[3]);
- Result = XMVectorMultiplyAdd(X, M.r[0], Result);
- XMVECTOR W = XMVectorSplatW(Result);
- return XMVectorDivide( Result, W );
- }
- //------------------------------------------------------------------------------
- _Use_decl_annotations_
- inline XMFLOAT2* XM_CALLCONV XMVector2TransformCoordStream
- (
- XMFLOAT2* pOutputStream,
- size_t OutputStride,
- const XMFLOAT2* pInputStream,
- size_t InputStride,
- size_t VectorCount,
- FXMMATRIX M
- )
- {
- assert(pOutputStream != nullptr);
- assert(pInputStream != nullptr);
- assert(InputStride >= sizeof(XMFLOAT2));
- _Analysis_assume_(InputStride >= sizeof(XMFLOAT2));
- assert(OutputStride >= sizeof(XMFLOAT2));
- _Analysis_assume_(OutputStride >= sizeof(XMFLOAT2));
- #if defined(_XM_NO_INTRINSICS_)
- const uint8_t* pInputVector = (const uint8_t*)pInputStream;
- uint8_t* pOutputVector = (uint8_t*)pOutputStream;
- const XMVECTOR row0 = M.r[0];
- const XMVECTOR row1 = M.r[1];
- const XMVECTOR row3 = M.r[3];
- for (size_t i = 0; i < VectorCount; i++)
- {
- XMVECTOR V = XMLoadFloat2((const XMFLOAT2*)pInputVector);
- XMVECTOR Y = XMVectorSplatY(V);
- XMVECTOR X = XMVectorSplatX(V);
- XMVECTOR Result = XMVectorMultiplyAdd(Y, row1, row3);
- Result = XMVectorMultiplyAdd(X, row0, Result);
- XMVECTOR W = XMVectorSplatW(Result);
- Result = XMVectorDivide(Result, W);
- #ifdef _PREFAST_
- #pragma prefast(push)
- #pragma prefast(disable : 26015, "PREfast noise: Esp:1307" )
- #endif
- XMStoreFloat2((XMFLOAT2*)pOutputVector, Result);
- #ifdef _PREFAST_
- #pragma prefast(pop)
- #endif
- pInputVector += InputStride;
- pOutputVector += OutputStride;
- }
- return pOutputStream;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- const uint8_t* pInputVector = (const uint8_t*)pInputStream;
- uint8_t* pOutputVector = (uint8_t*)pOutputStream;
- const XMVECTOR row0 = M.r[0];
- const XMVECTOR row1 = M.r[1];
- const XMVECTOR row3 = M.r[3];
- size_t i = 0;
- size_t four = VectorCount >> 2;
- if ( four > 0 )
- {
- if ((InputStride == sizeof(XMFLOAT2)) && (OutputStride == sizeof(XMFLOAT2)))
- {
- for (size_t j = 0; j < four; ++j)
- {
- float32x4x2_t V = vld2q_f32( reinterpret_cast<const float*>(pInputVector) );
- pInputVector += sizeof(XMFLOAT2)*4;
- float32x2_t r3 = vget_low_f32( row3 );
- float32x2_t r = vget_low_f32( row0 );
- XMVECTOR vResult0 = vmlaq_lane_f32( vdupq_lane_f32( r3, 0 ), V.val[0], r, 0 ); // Ax+M
- XMVECTOR vResult1 = vmlaq_lane_f32( vdupq_lane_f32( r3, 1 ), V.val[0], r, 1 ); // Bx+N
- __prefetch( pInputVector );
- r3 = vget_high_f32( row3 );
- r = vget_high_f32( row0 );
- XMVECTOR W = vmlaq_lane_f32( vdupq_lane_f32( r3, 1 ), V.val[0], r, 1 ); // Dx+P
-
- __prefetch( pInputVector+XM_CACHE_LINE_SIZE );
- r = vget_low_f32( row1 );
- vResult0 = vmlaq_lane_f32( vResult0, V.val[1], r, 0 ); // Ax+Ey+M
- vResult1 = vmlaq_lane_f32( vResult1, V.val[1], r, 1 ); // Bx+Fy+N
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*2) );
- r = vget_high_f32( row1 );
- W = vmlaq_lane_f32( W, V.val[1], r, 1 ); // Dx+Hy+P
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*3) );
- #if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64)
- V.val[0] = vdivq_f32( vResult0, W );
- V.val[1] = vdivq_f32( vResult1, W );
- #else
- // 2 iterations of Newton-Raphson refinement of reciprocal
- float32x4_t Reciprocal = vrecpeq_f32(W);
- float32x4_t S = vrecpsq_f32( Reciprocal, W );
- Reciprocal = vmulq_f32( S, Reciprocal );
- S = vrecpsq_f32( Reciprocal, W );
- Reciprocal = vmulq_f32( S, Reciprocal );
-
- V.val[0] = vmulq_f32( vResult0, Reciprocal );
- V.val[1] = vmulq_f32( vResult1, Reciprocal );
- #endif
- vst2q_f32( reinterpret_cast<float*>(pOutputVector),V );
- pOutputVector += sizeof(XMFLOAT2)*4;
- i += 4;
- }
- }
- }
- for (; i < VectorCount; i++)
- {
- float32x2_t V = vld1_f32( reinterpret_cast<const float*>(pInputVector) );
- pInputVector += InputStride;
- XMVECTOR vResult = vmlaq_lane_f32( row3, row0, V, 0 ); // X
- vResult = vmlaq_lane_f32( vResult, row1, V, 1 ); // Y
- V = vget_high_f32( vResult );
- float32x2_t W = vdup_lane_f32( V, 1 );
- #if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64)
- V = vget_low_f32( vResult );
- V = vdiv_f32( V, W );
- #else
- // 2 iterations of Newton-Raphson refinement of reciprocal for W
- float32x2_t Reciprocal = vrecpe_f32( W );
- float32x2_t S = vrecps_f32( Reciprocal, W );
- Reciprocal = vmul_f32( S, Reciprocal );
- S = vrecps_f32( Reciprocal, W );
- Reciprocal = vmul_f32( S, Reciprocal );
- V = vget_low_f32( vResult );
- V = vmul_f32( V, Reciprocal );
- #endif
- vst1_f32( reinterpret_cast<float*>(pOutputVector), V );
- pOutputVector += OutputStride;
- }
- return pOutputStream;
- #elif defined(_XM_SSE_INTRINSICS_)
- const uint8_t* pInputVector = (const uint8_t*)pInputStream;
- uint8_t* pOutputVector = (uint8_t*)pOutputStream;
- const XMVECTOR row0 = M.r[0];
- const XMVECTOR row1 = M.r[1];
- const XMVECTOR row3 = M.r[3];
- size_t i = 0;
- size_t two = VectorCount >> 1;
- if ( two > 0 )
- {
- if ( InputStride == sizeof(XMFLOAT2) )
- {
- if ( OutputStride == sizeof(XMFLOAT2) )
- {
- if ( !((uintptr_t)pOutputStream & 0xF) )
- {
- // Packed input, aligned & packed output
- for (size_t j = 0; j < two; ++j)
- {
- XMVECTOR V = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector) );
- pInputVector += sizeof(XMFLOAT2)*2;
- // Result 1
- XMVECTOR Y = XM_PERMUTE_PS( V, _MM_SHUFFLE(1, 1, 1, 1) );
- XMVECTOR X = XM_PERMUTE_PS( V, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Y, row1 );
- XMVECTOR vTemp2 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- XMVECTOR W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- XMVECTOR V1 = _mm_div_ps( vTemp, W );
- // Result 2
- Y = XM_PERMUTE_PS( V, _MM_SHUFFLE(3, 3, 3, 3) );
- X = XM_PERMUTE_PS( V, _MM_SHUFFLE(2, 2, 2, 2) );
- vTemp = _mm_mul_ps( Y, row1 );
- vTemp2 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- XMVECTOR V2 = _mm_div_ps( vTemp, W );
- vTemp = _mm_movelh_ps( V1, V2 );
- XM_STREAM_PS( reinterpret_cast<float*>(pOutputVector), vTemp );
- pOutputVector += sizeof(XMFLOAT2)*2;
- i += 2;
- }
- }
- else
- {
- // Packed input, unaligned & packed output
- for (size_t j = 0; j < two; ++j)
- {
- XMVECTOR V = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector) );
- pInputVector += sizeof(XMFLOAT2)*2;
- // Result 1
- XMVECTOR Y = XM_PERMUTE_PS( V, _MM_SHUFFLE(1, 1, 1, 1) );
- XMVECTOR X = XM_PERMUTE_PS( V, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Y, row1 );
- XMVECTOR vTemp2 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- XMVECTOR W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- XMVECTOR V1 = _mm_div_ps( vTemp, W );
- // Result 2
- Y = XM_PERMUTE_PS( V, _MM_SHUFFLE(3, 3, 3, 3) );
- X = XM_PERMUTE_PS( V, _MM_SHUFFLE(2, 2, 2, 2) );
- vTemp = _mm_mul_ps( Y, row1 );
- vTemp2 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- XMVECTOR V2 = _mm_div_ps( vTemp, W );
- vTemp = _mm_movelh_ps( V1, V2 );
- _mm_storeu_ps( reinterpret_cast<float*>(pOutputVector), vTemp );
- pOutputVector += sizeof(XMFLOAT2)*2;
- i += 2;
- }
- }
- }
- else
- {
- // Packed input, unpacked output
- for (size_t j = 0; j < two; ++j)
- {
- XMVECTOR V = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector) );
- pInputVector += sizeof(XMFLOAT2)*2;
- // Result 1
- XMVECTOR Y = XM_PERMUTE_PS( V, _MM_SHUFFLE(1, 1, 1, 1) );
- XMVECTOR X = XM_PERMUTE_PS( V, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Y, row1 );
- XMVECTOR vTemp2 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- XMVECTOR W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- vTemp = _mm_div_ps( vTemp, W );
- vTemp2 = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(1, 1, 1, 1) );
- _mm_store_ss( reinterpret_cast<float*>(pOutputVector), vTemp );
- _mm_store_ss( reinterpret_cast<float*>(pOutputVector+4), vTemp2 );
- pOutputVector += OutputStride;
- // Result 2
- Y = XM_PERMUTE_PS( V, _MM_SHUFFLE(3, 3, 3, 3) );
- X = XM_PERMUTE_PS( V, _MM_SHUFFLE(2, 2, 2, 2) );
- vTemp = _mm_mul_ps( Y, row1 );
- vTemp2 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- vTemp = _mm_div_ps( vTemp, W );
- vTemp2 = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(1, 1, 1, 1) );
- _mm_store_ss( reinterpret_cast<float*>(pOutputVector), vTemp );
- _mm_store_ss( reinterpret_cast<float*>(pOutputVector+4), vTemp2 );
- pOutputVector += OutputStride;
- i += 2;
- }
- }
- }
- }
- if ( !((uintptr_t)pInputVector & 0xF) && !(InputStride & 0xF) )
- {
- // Aligned input
- for (; i < VectorCount; i++)
- {
- XMVECTOR V = _mm_castsi128_ps( _mm_loadl_epi64( reinterpret_cast<const __m128i*>(pInputVector) ) );
- pInputVector += InputStride;
- XMVECTOR Y = XM_PERMUTE_PS( V, _MM_SHUFFLE(1, 1, 1, 1) );
- XMVECTOR X = XM_PERMUTE_PS( V, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Y, row1 );
- XMVECTOR vTemp2 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- XMVECTOR W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- vTemp = _mm_div_ps( vTemp, W );
- vTemp2 = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(1, 1, 1, 1) );
- _mm_store_ss( reinterpret_cast<float*>(pOutputVector), vTemp );
- _mm_store_ss( reinterpret_cast<float*>(pOutputVector+4), vTemp2 );
- pOutputVector += OutputStride;
- }
- }
- else
- {
- // Unaligned input
- for (; i < VectorCount; i++)
- {
- __m128 x = _mm_load_ss( reinterpret_cast<const float*>(pInputVector) );
- __m128 y = _mm_load_ss( reinterpret_cast<const float*>(pInputVector+4) );
- pInputVector += InputStride;
- XMVECTOR Y = XM_PERMUTE_PS( y, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR X = XM_PERMUTE_PS( x, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Y, row1 );
- XMVECTOR vTemp2 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- XMVECTOR W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- vTemp = _mm_div_ps( vTemp, W );
- vTemp2 = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(1, 1, 1, 1) );
- _mm_store_ss( reinterpret_cast<float*>(pOutputVector), vTemp );
- _mm_store_ss( reinterpret_cast<float*>(pOutputVector+4), vTemp2 );
- pOutputVector += OutputStride;
- }
- }
- XM_SFENCE();
- return pOutputStream;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector2TransformNormal
- (
- FXMVECTOR V,
- FXMMATRIX M
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR Y = XMVectorSplatY(V);
- XMVECTOR X = XMVectorSplatX(V);
- XMVECTOR Result = XMVectorMultiply(Y, M.r[1]);
- Result = XMVectorMultiplyAdd(X, M.r[0], Result);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- float32x2_t VL = vget_low_f32( V );
- float32x4_t Result = vmulq_lane_f32( M.r[1], VL, 1 ); // Y
- return vmlaq_lane_f32( Result, M.r[0], VL, 0 ); // X
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vResult = XM_PERMUTE_PS(V,_MM_SHUFFLE(0,0,0,0));
- vResult = _mm_mul_ps(vResult,M.r[0]);
- XMVECTOR vTemp = XM_PERMUTE_PS(V,_MM_SHUFFLE(1,1,1,1));
- vTemp = _mm_mul_ps(vTemp,M.r[1]);
- vResult = _mm_add_ps(vResult,vTemp);
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- _Use_decl_annotations_
- inline XMFLOAT2* XM_CALLCONV XMVector2TransformNormalStream
- (
- XMFLOAT2* pOutputStream,
- size_t OutputStride,
- const XMFLOAT2* pInputStream,
- size_t InputStride,
- size_t VectorCount,
- FXMMATRIX M
- )
- {
- assert(pOutputStream != nullptr);
- assert(pInputStream != nullptr);
- assert(InputStride >= sizeof(XMFLOAT2));
- _Analysis_assume_(InputStride >= sizeof(XMFLOAT2));
- assert(OutputStride >= sizeof(XMFLOAT2));
- _Analysis_assume_(OutputStride >= sizeof(XMFLOAT2));
- #if defined(_XM_NO_INTRINSICS_)
- const uint8_t* pInputVector = (const uint8_t*)pInputStream;
- uint8_t* pOutputVector = (uint8_t*)pOutputStream;
- const XMVECTOR row0 = M.r[0];
- const XMVECTOR row1 = M.r[1];
- for (size_t i = 0; i < VectorCount; i++)
- {
- XMVECTOR V = XMLoadFloat2((const XMFLOAT2*)pInputVector);
- XMVECTOR Y = XMVectorSplatY(V);
- XMVECTOR X = XMVectorSplatX(V);
- XMVECTOR Result = XMVectorMultiply(Y, row1);
- Result = XMVectorMultiplyAdd(X, row0, Result);
- #ifdef _PREFAST_
- #pragma prefast(push)
- #pragma prefast(disable : 26015, "PREfast noise: Esp:1307" )
- #endif
- XMStoreFloat2((XMFLOAT2*)pOutputVector, Result);
- #ifdef _PREFAST_
- #pragma prefast(pop)
- #endif
- pInputVector += InputStride;
- pOutputVector += OutputStride;
- }
- return pOutputStream;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- const uint8_t* pInputVector = (const uint8_t*)pInputStream;
- uint8_t* pOutputVector = (uint8_t*)pOutputStream;
- const XMVECTOR row0 = M.r[0];
- const XMVECTOR row1 = M.r[1];
- size_t i = 0;
- size_t four = VectorCount >> 2;
- if ( four > 0 )
- {
- if ((InputStride == sizeof(XMFLOAT2)) && (OutputStride == sizeof(XMFLOAT2)))
- {
- for (size_t j = 0; j < four; ++j)
- {
- float32x4x2_t V = vld2q_f32( reinterpret_cast<const float*>(pInputVector) );
- pInputVector += sizeof(XMFLOAT2)*4;
- float32x2_t r = vget_low_f32( row0 );
- XMVECTOR vResult0 = vmulq_lane_f32( V.val[0], r, 0 ); // Ax
- XMVECTOR vResult1 = vmulq_lane_f32( V.val[0], r, 1 ); // Bx
- __prefetch( pInputVector );
- __prefetch( pInputVector+XM_CACHE_LINE_SIZE );
- r = vget_low_f32( row1 );
- vResult0 = vmlaq_lane_f32( vResult0, V.val[1], r, 0 ); // Ax+Ey
- vResult1 = vmlaq_lane_f32( vResult1, V.val[1], r, 1 ); // Bx+Fy
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*2) );
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*3) );
- V.val[0] = vResult0;
- V.val[1] = vResult1;
- vst2q_f32( reinterpret_cast<float*>(pOutputVector), V );
- pOutputVector += sizeof(XMFLOAT2)*4;
- i += 4;
- }
- }
- }
- for (; i < VectorCount; i++)
- {
- float32x2_t V = vld1_f32( reinterpret_cast<const float*>(pInputVector) );
- pInputVector += InputStride;
- XMVECTOR vResult = vmulq_lane_f32( row0, V, 0 ); // X
- vResult = vmlaq_lane_f32( vResult, row1, V, 1 ); // Y
- V = vget_low_f32( vResult );
- vst1_f32( reinterpret_cast<float*>(pOutputVector), V );
- pOutputVector += OutputStride;
- }
- return pOutputStream;
- #elif defined(_XM_SSE_INTRINSICS_)
- const uint8_t* pInputVector = (const uint8_t*)pInputStream;
- uint8_t* pOutputVector = (uint8_t*)pOutputStream;
- const XMVECTOR row0 = M.r[0];
- const XMVECTOR row1 = M.r[1];
- size_t i = 0;
- size_t two = VectorCount >> 1;
- if ( two > 0 )
- {
- if ( InputStride == sizeof(XMFLOAT2) )
- {
- if ( OutputStride == sizeof(XMFLOAT2) )
- {
- if ( !((uintptr_t)pOutputStream & 0xF) )
- {
- // Packed input, aligned & packed output
- for (size_t j = 0; j < two; ++j)
- {
- XMVECTOR V = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector) );
- pInputVector += sizeof(XMFLOAT2)*2;
- // Result 1
- XMVECTOR Y = XM_PERMUTE_PS( V, _MM_SHUFFLE(1, 1, 1, 1) );
- XMVECTOR X = XM_PERMUTE_PS( V, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Y, row1 );
- XMVECTOR vTemp2 = _mm_mul_ps( X, row0 );
- XMVECTOR V1 = _mm_add_ps( vTemp, vTemp2 );
- // Result 2
- Y = XM_PERMUTE_PS( V, _MM_SHUFFLE(3, 3, 3, 3) );
- X = XM_PERMUTE_PS( V, _MM_SHUFFLE(2, 2, 2, 2) );
- vTemp = _mm_mul_ps( Y, row1 );
- vTemp2 = _mm_mul_ps( X, row0 );
- XMVECTOR V2 = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_movelh_ps( V1, V2 );
- XM_STREAM_PS( reinterpret_cast<float*>(pOutputVector), vTemp );
- pOutputVector += sizeof(XMFLOAT2)*2;
- i += 2;
- }
- }
- else
- {
- // Packed input, unaligned & packed output
- for (size_t j = 0; j < two; ++j)
- {
- XMVECTOR V = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector) );
- pInputVector += sizeof(XMFLOAT2)*2;
- // Result 1
- XMVECTOR Y = XM_PERMUTE_PS( V, _MM_SHUFFLE(1, 1, 1, 1) );
- XMVECTOR X = XM_PERMUTE_PS( V, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Y, row1 );
- XMVECTOR vTemp2 = _mm_mul_ps( X, row0 );
- XMVECTOR V1 = _mm_add_ps( vTemp, vTemp2 );
- // Result 2
- Y = XM_PERMUTE_PS( V, _MM_SHUFFLE(3, 3, 3, 3) );
- X = XM_PERMUTE_PS( V, _MM_SHUFFLE(2, 2, 2, 2) );
- vTemp = _mm_mul_ps( Y, row1 );
- vTemp2 = _mm_mul_ps( X, row0 );
- XMVECTOR V2 = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_movelh_ps( V1, V2 );
- _mm_storeu_ps( reinterpret_cast<float*>(pOutputVector), vTemp );
- pOutputVector += sizeof(XMFLOAT2)*2;
- i += 2;
- }
- }
- }
- else
- {
- // Packed input, unpacked output
- for (size_t j = 0; j < two; ++j)
- {
- XMVECTOR V = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector) );
- pInputVector += sizeof(XMFLOAT2)*2;
- // Result 1
- XMVECTOR Y = XM_PERMUTE_PS( V, _MM_SHUFFLE(1, 1, 1, 1) );
- XMVECTOR X = XM_PERMUTE_PS( V, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Y, row1 );
- XMVECTOR vTemp2 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp2 = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(1, 1, 1, 1) );
- _mm_store_ss( reinterpret_cast<float*>(pOutputVector), vTemp );
- _mm_store_ss( reinterpret_cast<float*>(pOutputVector+4), vTemp2 );
- pOutputVector += OutputStride;
- // Result 2
- Y = XM_PERMUTE_PS( V, _MM_SHUFFLE(3, 3, 3, 3) );
- X = XM_PERMUTE_PS( V, _MM_SHUFFLE(2, 2, 2, 2) );
- vTemp = _mm_mul_ps( Y, row1 );
- vTemp2 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp2 = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(1, 1, 1, 1) );
- _mm_store_ss( reinterpret_cast<float*>(pOutputVector), vTemp );
- _mm_store_ss( reinterpret_cast<float*>(pOutputVector+4), vTemp2 );
- pOutputVector += OutputStride;
- i += 2;
- }
- }
- }
- }
- if ( !((uintptr_t)pInputVector & 0xF) && !(InputStride & 0xF) )
- {
- // Aligned input
- for (; i < VectorCount; i++)
- {
- XMVECTOR V = _mm_castsi128_ps( _mm_loadl_epi64( reinterpret_cast<const __m128i*>(pInputVector) ) );
- pInputVector += InputStride;
- XMVECTOR Y = XM_PERMUTE_PS( V, _MM_SHUFFLE(1, 1, 1, 1) );
- XMVECTOR X = XM_PERMUTE_PS( V, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Y, row1 );
- XMVECTOR vTemp2 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp2 = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(1, 1, 1, 1) );
- _mm_store_ss( reinterpret_cast<float*>(pOutputVector), vTemp );
- _mm_store_ss( reinterpret_cast<float*>(pOutputVector+4), vTemp2 );
- pOutputVector += OutputStride;
- }
- }
- else
- {
- // Unaligned input
- for (; i < VectorCount; i++)
- {
- __m128 x = _mm_load_ss( reinterpret_cast<const float*>(pInputVector) );
- __m128 y = _mm_load_ss( reinterpret_cast<const float*>(pInputVector+4) );
- pInputVector += InputStride;
- XMVECTOR Y = XM_PERMUTE_PS( y, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR X = XM_PERMUTE_PS( x, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Y, row1 );
- XMVECTOR vTemp2 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp2 = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(1, 1, 1, 1) );
- _mm_store_ss( reinterpret_cast<float*>(pOutputVector), vTemp );
- _mm_store_ss( reinterpret_cast<float*>(pOutputVector+4), vTemp2 );
- pOutputVector += OutputStride;
- }
- }
- XM_SFENCE();
- return pOutputStream;
- #endif
- }
- /****************************************************************************
- *
- * 3D Vector
- *
- ****************************************************************************/
- //------------------------------------------------------------------------------
- // Comparison operations
- //------------------------------------------------------------------------------
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector3Equal
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (((V1.vector4_f32[0] == V2.vector4_f32[0]) && (V1.vector4_f32[1] == V2.vector4_f32[1]) && (V1.vector4_f32[2] == V2.vector4_f32[2])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t vResult = vceqq_f32( V1, V2 );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- return ( (vget_lane_u32(vTemp.val[1], 1) & 0xFFFFFFU) == 0xFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_cmpeq_ps(V1,V2);
- return (((_mm_movemask_ps(vTemp)&7)==7) != 0);
- #endif
- }
- //------------------------------------------------------------------------------
- inline uint32_t XM_CALLCONV XMVector3EqualR
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- uint32_t CR = 0;
- if ((V1.vector4_f32[0] == V2.vector4_f32[0]) &&
- (V1.vector4_f32[1] == V2.vector4_f32[1]) &&
- (V1.vector4_f32[2] == V2.vector4_f32[2]))
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if ((V1.vector4_f32[0] != V2.vector4_f32[0]) &&
- (V1.vector4_f32[1] != V2.vector4_f32[1]) &&
- (V1.vector4_f32[2] != V2.vector4_f32[2]))
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t vResult = vceqq_f32( V1, V2 );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- uint32_t r = vget_lane_u32(vTemp.val[1], 1) & 0xFFFFFFU;
- uint32_t CR = 0;
- if ( r == 0xFFFFFFU )
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if ( !r )
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_cmpeq_ps(V1,V2);
- int iTest = _mm_movemask_ps(vTemp)&7;
- uint32_t CR = 0;
- if (iTest==7)
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if (!iTest)
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector3EqualInt
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (((V1.vector4_u32[0] == V2.vector4_u32[0]) && (V1.vector4_u32[1] == V2.vector4_u32[1]) && (V1.vector4_u32[2] == V2.vector4_u32[2])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t vResult = vceqq_u32( V1, V2 );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- return ( (vget_lane_u32(vTemp.val[1], 1) & 0xFFFFFFU) == 0xFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128i vTemp = _mm_cmpeq_epi32(_mm_castps_si128(V1),_mm_castps_si128(V2));
- return (((_mm_movemask_ps(_mm_castsi128_ps(vTemp))&7)==7) != 0);
- #endif
- }
- //------------------------------------------------------------------------------
- inline uint32_t XM_CALLCONV XMVector3EqualIntR
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- uint32_t CR = 0;
- if ((V1.vector4_u32[0] == V2.vector4_u32[0]) &&
- (V1.vector4_u32[1] == V2.vector4_u32[1]) &&
- (V1.vector4_u32[2] == V2.vector4_u32[2]))
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if ((V1.vector4_u32[0] != V2.vector4_u32[0]) &&
- (V1.vector4_u32[1] != V2.vector4_u32[1]) &&
- (V1.vector4_u32[2] != V2.vector4_u32[2]))
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t vResult = vceqq_u32( V1, V2 );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- uint32_t r = vget_lane_u32(vTemp.val[1], 1) & 0xFFFFFFU;
- uint32_t CR = 0;
- if ( r == 0xFFFFFFU )
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if ( !r )
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128i vTemp = _mm_cmpeq_epi32(_mm_castps_si128(V1),_mm_castps_si128(V2));
- int iTemp = _mm_movemask_ps(_mm_castsi128_ps(vTemp))&7;
- uint32_t CR = 0;
- if (iTemp==7)
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if (!iTemp)
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector3NearEqual
- (
- FXMVECTOR V1,
- FXMVECTOR V2,
- FXMVECTOR Epsilon
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- float dx, dy, dz;
- dx = fabsf(V1.vector4_f32[0]-V2.vector4_f32[0]);
- dy = fabsf(V1.vector4_f32[1]-V2.vector4_f32[1]);
- dz = fabsf(V1.vector4_f32[2]-V2.vector4_f32[2]);
- return (((dx <= Epsilon.vector4_f32[0]) &&
- (dy <= Epsilon.vector4_f32[1]) &&
- (dz <= Epsilon.vector4_f32[2])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- float32x4_t vDelta = vsubq_f32( V1, V2 );
- uint32x4_t vResult = vacleq_f32( vDelta, Epsilon );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- return ( (vget_lane_u32(vTemp.val[1], 1) & 0xFFFFFFU) == 0xFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- // Get the difference
- XMVECTOR vDelta = _mm_sub_ps(V1,V2);
- // Get the absolute value of the difference
- XMVECTOR vTemp = _mm_setzero_ps();
- vTemp = _mm_sub_ps(vTemp,vDelta);
- vTemp = _mm_max_ps(vTemp,vDelta);
- vTemp = _mm_cmple_ps(vTemp,Epsilon);
- // w is don't care
- return (((_mm_movemask_ps(vTemp)&7)==0x7) != 0);
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector3NotEqual
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (((V1.vector4_f32[0] != V2.vector4_f32[0]) || (V1.vector4_f32[1] != V2.vector4_f32[1]) || (V1.vector4_f32[2] != V2.vector4_f32[2])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t vResult = vceqq_f32( V1, V2 );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- return ( (vget_lane_u32(vTemp.val[1], 1) & 0xFFFFFFU) != 0xFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_cmpeq_ps(V1,V2);
- return (((_mm_movemask_ps(vTemp)&7)!=7) != 0);
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector3NotEqualInt
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (((V1.vector4_u32[0] != V2.vector4_u32[0]) || (V1.vector4_u32[1] != V2.vector4_u32[1]) || (V1.vector4_u32[2] != V2.vector4_u32[2])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t vResult = vceqq_u32( V1, V2 );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- return ( (vget_lane_u32(vTemp.val[1], 1) & 0xFFFFFFU) != 0xFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128i vTemp = _mm_cmpeq_epi32(_mm_castps_si128(V1),_mm_castps_si128(V2));
- return (((_mm_movemask_ps(_mm_castsi128_ps(vTemp))&7)!=7) != 0);
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector3Greater
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (((V1.vector4_f32[0] > V2.vector4_f32[0]) && (V1.vector4_f32[1] > V2.vector4_f32[1]) && (V1.vector4_f32[2] > V2.vector4_f32[2])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t vResult = vcgtq_f32( V1, V2 );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- return ( (vget_lane_u32(vTemp.val[1], 1) & 0xFFFFFFU) == 0xFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_cmpgt_ps(V1,V2);
- return (((_mm_movemask_ps(vTemp)&7)==7) != 0);
- #endif
- }
- //------------------------------------------------------------------------------
- inline uint32_t XM_CALLCONV XMVector3GreaterR
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- uint32_t CR = 0;
- if ((V1.vector4_f32[0] > V2.vector4_f32[0]) &&
- (V1.vector4_f32[1] > V2.vector4_f32[1]) &&
- (V1.vector4_f32[2] > V2.vector4_f32[2]))
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if ((V1.vector4_f32[0] <= V2.vector4_f32[0]) &&
- (V1.vector4_f32[1] <= V2.vector4_f32[1]) &&
- (V1.vector4_f32[2] <= V2.vector4_f32[2]))
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t vResult = vcgtq_f32( V1, V2 );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- uint32_t r = vget_lane_u32(vTemp.val[1], 1) & 0xFFFFFFU;
- uint32_t CR = 0;
- if ( r == 0xFFFFFFU )
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if ( !r )
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_cmpgt_ps(V1,V2);
- uint32_t CR = 0;
- int iTest = _mm_movemask_ps(vTemp)&7;
- if (iTest==7)
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if (!iTest)
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector3GreaterOrEqual
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (((V1.vector4_f32[0] >= V2.vector4_f32[0]) && (V1.vector4_f32[1] >= V2.vector4_f32[1]) && (V1.vector4_f32[2] >= V2.vector4_f32[2])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t vResult = vcgeq_f32( V1, V2 );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- return ( (vget_lane_u32(vTemp.val[1], 1) & 0xFFFFFFU) == 0xFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_cmpge_ps(V1,V2);
- return (((_mm_movemask_ps(vTemp)&7)==7) != 0);
- #endif
- }
- //------------------------------------------------------------------------------
- inline uint32_t XM_CALLCONV XMVector3GreaterOrEqualR
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- uint32_t CR = 0;
- if ((V1.vector4_f32[0] >= V2.vector4_f32[0]) &&
- (V1.vector4_f32[1] >= V2.vector4_f32[1]) &&
- (V1.vector4_f32[2] >= V2.vector4_f32[2]))
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if ((V1.vector4_f32[0] < V2.vector4_f32[0]) &&
- (V1.vector4_f32[1] < V2.vector4_f32[1]) &&
- (V1.vector4_f32[2] < V2.vector4_f32[2]))
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t vResult = vcgeq_f32( V1, V2 );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- uint32_t r = vget_lane_u32(vTemp.val[1], 1) & 0xFFFFFFU;
- uint32_t CR = 0;
- if ( r == 0xFFFFFFU )
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if ( !r )
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_cmpge_ps(V1,V2);
- uint32_t CR = 0;
- int iTest = _mm_movemask_ps(vTemp)&7;
- if (iTest==7)
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if (!iTest)
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector3Less
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (((V1.vector4_f32[0] < V2.vector4_f32[0]) && (V1.vector4_f32[1] < V2.vector4_f32[1]) && (V1.vector4_f32[2] < V2.vector4_f32[2])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t vResult = vcltq_f32( V1, V2 );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- return ( (vget_lane_u32(vTemp.val[1], 1) & 0xFFFFFFU) == 0xFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_cmplt_ps(V1,V2);
- return (((_mm_movemask_ps(vTemp)&7)==7) != 0);
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector3LessOrEqual
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (((V1.vector4_f32[0] <= V2.vector4_f32[0]) && (V1.vector4_f32[1] <= V2.vector4_f32[1]) && (V1.vector4_f32[2] <= V2.vector4_f32[2])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t vResult = vcleq_f32( V1, V2 );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- return ( (vget_lane_u32(vTemp.val[1], 1) & 0xFFFFFFU) == 0xFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_cmple_ps(V1,V2);
- return (((_mm_movemask_ps(vTemp)&7)==7) != 0);
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector3InBounds
- (
- FXMVECTOR V,
- FXMVECTOR Bounds
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (((V.vector4_f32[0] <= Bounds.vector4_f32[0] && V.vector4_f32[0] >= -Bounds.vector4_f32[0]) &&
- (V.vector4_f32[1] <= Bounds.vector4_f32[1] && V.vector4_f32[1] >= -Bounds.vector4_f32[1]) &&
- (V.vector4_f32[2] <= Bounds.vector4_f32[2] && V.vector4_f32[2] >= -Bounds.vector4_f32[2])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Test if less than or equal
- uint32x4_t ivTemp1 = vcleq_f32(V,Bounds);
- // Negate the bounds
- float32x4_t vTemp2 = vnegq_f32(Bounds);
- // Test if greater or equal (Reversed)
- uint32x4_t ivTemp2 = vcleq_f32(vTemp2,V);
- // Blend answers
- ivTemp1 = vandq_u32(ivTemp1,ivTemp2);
- // in bounds?
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(ivTemp1), vget_high_u8(ivTemp1));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- return ( (vget_lane_u32(vTemp.val[1], 1) & 0xFFFFFFU) == 0xFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- // Test if less than or equal
- XMVECTOR vTemp1 = _mm_cmple_ps(V,Bounds);
- // Negate the bounds
- XMVECTOR vTemp2 = _mm_mul_ps(Bounds,g_XMNegativeOne);
- // Test if greater or equal (Reversed)
- vTemp2 = _mm_cmple_ps(vTemp2,V);
- // Blend answers
- vTemp1 = _mm_and_ps(vTemp1,vTemp2);
- // x,y and z in bounds? (w is don't care)
- return (((_mm_movemask_ps(vTemp1)&0x7)==0x7) != 0);
- #else
- return XMComparisonAllInBounds(XMVector3InBoundsR(V, Bounds));
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector3IsNaN
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (XMISNAN(V.vector4_f32[0]) ||
- XMISNAN(V.vector4_f32[1]) ||
- XMISNAN(V.vector4_f32[2]));
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Test against itself. NaN is always not equal
- uint32x4_t vTempNan = vceqq_f32( V, V );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vTempNan), vget_high_u8(vTempNan));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- // If x or y or z are NaN, the mask is zero
- return ( (vget_lane_u32(vTemp.val[1], 1) & 0xFFFFFFU) != 0xFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- // Test against itself. NaN is always not equal
- XMVECTOR vTempNan = _mm_cmpneq_ps(V,V);
- // If x or y or z are NaN, the mask is non-zero
- return ((_mm_movemask_ps(vTempNan)&7) != 0);
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector3IsInfinite
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (XMISINF(V.vector4_f32[0]) ||
- XMISINF(V.vector4_f32[1]) ||
- XMISINF(V.vector4_f32[2]));
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Mask off the sign bit
- uint32x4_t vTempInf = vandq_u32( V, g_XMAbsMask );
- // Compare to infinity
- vTempInf = vceqq_f32(vTempInf, g_XMInfinity );
- // If any are infinity, the signs are true.
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vTempInf), vget_high_u8(vTempInf));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- return ( (vget_lane_u32(vTemp.val[1], 1) & 0xFFFFFFU) != 0 );
- #elif defined(_XM_SSE_INTRINSICS_)
- // Mask off the sign bit
- __m128 vTemp = _mm_and_ps(V,g_XMAbsMask);
- // Compare to infinity
- vTemp = _mm_cmpeq_ps(vTemp,g_XMInfinity);
- // If x,y or z are infinity, the signs are true.
- return ((_mm_movemask_ps(vTemp)&7) != 0);
- #endif
- }
- //------------------------------------------------------------------------------
- // Computation operations
- //------------------------------------------------------------------------------
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector3Dot
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- float fValue = V1.vector4_f32[0] * V2.vector4_f32[0] + V1.vector4_f32[1] * V2.vector4_f32[1] + V1.vector4_f32[2] * V2.vector4_f32[2];
- XMVECTORF32 vResult;
- vResult.f[0] =
- vResult.f[1] =
- vResult.f[2] =
- vResult.f[3] = fValue;
- return vResult.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- float32x4_t vTemp = vmulq_f32( V1, V2 );
- float32x2_t v1 = vget_low_f32( vTemp );
- float32x2_t v2 = vget_high_f32( vTemp );
- v1 = vpadd_f32( v1, v1 );
- v2 = vdup_lane_f32( v2, 0 );
- v1 = vadd_f32( v1, v2 );
- return vcombine_f32( v1, v1 );
- #elif defined(_XM_SSE4_INTRINSICS_)
- return _mm_dp_ps( V1, V2, 0x7f );
- #elif defined(_XM_SSE3_INTRINSICS_)
- XMVECTOR vTemp = _mm_mul_ps(V1,V2);
- vTemp = _mm_and_ps(vTemp, g_XMMask3);
- vTemp = _mm_hadd_ps(vTemp,vTemp);
- return _mm_hadd_ps(vTemp,vTemp);
- #elif defined(_XM_SSE_INTRINSICS_)
- // Perform the dot product
- XMVECTOR vDot = _mm_mul_ps(V1,V2);
- // x=Dot.vector4_f32[1], y=Dot.vector4_f32[2]
- XMVECTOR vTemp = XM_PERMUTE_PS(vDot,_MM_SHUFFLE(2,1,2,1));
- // Result.vector4_f32[0] = x+y
- vDot = _mm_add_ss(vDot,vTemp);
- // x=Dot.vector4_f32[2]
- vTemp = XM_PERMUTE_PS(vTemp,_MM_SHUFFLE(1,1,1,1));
- // Result.vector4_f32[0] = (x+y)+z
- vDot = _mm_add_ss(vDot,vTemp);
- // Splat x
- return XM_PERMUTE_PS(vDot,_MM_SHUFFLE(0,0,0,0));
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector3Cross
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- // [ V1.y*V2.z - V1.z*V2.y, V1.z*V2.x - V1.x*V2.z, V1.x*V2.y - V1.y*V2.x ]
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 vResult = { { {
- (V1.vector4_f32[1] * V2.vector4_f32[2]) - (V1.vector4_f32[2] * V2.vector4_f32[1]),
- (V1.vector4_f32[2] * V2.vector4_f32[0]) - (V1.vector4_f32[0] * V2.vector4_f32[2]),
- (V1.vector4_f32[0] * V2.vector4_f32[1]) - (V1.vector4_f32[1] * V2.vector4_f32[0]),
- 0.0f
- } } };
- return vResult.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- float32x2_t v1xy = vget_low_f32(V1);
- float32x2_t v2xy = vget_low_f32(V2);
- float32x2_t v1yx = vrev64_f32( v1xy );
- float32x2_t v2yx = vrev64_f32( v2xy );
- float32x2_t v1zz = vdup_lane_f32( vget_high_f32(V1), 0 );
- float32x2_t v2zz = vdup_lane_f32( vget_high_f32(V2), 0 );
- XMVECTOR vResult = vmulq_f32( vcombine_f32(v1yx,v1xy), vcombine_f32(v2zz,v2yx) );
- vResult = vmlsq_f32( vResult, vcombine_f32(v1zz,v1yx), vcombine_f32(v2yx,v2xy) );
- vResult = veorq_u32( vResult, g_XMFlipY );
- return vandq_u32( vResult, g_XMMask3 );
- #elif defined(_XM_SSE_INTRINSICS_)
- // y1,z1,x1,w1
- XMVECTOR vTemp1 = XM_PERMUTE_PS(V1,_MM_SHUFFLE(3,0,2,1));
- // z2,x2,y2,w2
- XMVECTOR vTemp2 = XM_PERMUTE_PS(V2,_MM_SHUFFLE(3,1,0,2));
- // Perform the left operation
- XMVECTOR vResult = _mm_mul_ps(vTemp1,vTemp2);
- // z1,x1,y1,w1
- vTemp1 = XM_PERMUTE_PS(vTemp1,_MM_SHUFFLE(3,0,2,1));
- // y2,z2,x2,w2
- vTemp2 = XM_PERMUTE_PS(vTemp2,_MM_SHUFFLE(3,1,0,2));
- // Perform the right operation
- vTemp1 = _mm_mul_ps(vTemp1,vTemp2);
- // Subract the right from left, and return answer
- vResult = _mm_sub_ps(vResult,vTemp1);
- // Set w to zero
- return _mm_and_ps(vResult,g_XMMask3);
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector3LengthSq
- (
- FXMVECTOR V
- )
- {
- return XMVector3Dot(V, V);
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector3ReciprocalLengthEst
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR Result;
- Result = XMVector3LengthSq(V);
- Result = XMVectorReciprocalSqrtEst(Result);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Dot3
- float32x4_t vTemp = vmulq_f32( V, V );
- float32x2_t v1 = vget_low_f32( vTemp );
- float32x2_t v2 = vget_high_f32( vTemp );
- v1 = vpadd_f32( v1, v1 );
- v2 = vdup_lane_f32( v2, 0 );
- v1 = vadd_f32( v1, v2 );
- // Reciprocal sqrt (estimate)
- v2 = vrsqrte_f32( v1 );
- return vcombine_f32(v2, v2);
- #elif defined(_XM_SSE4_INTRINSICS_)
- XMVECTOR vTemp = _mm_dp_ps( V, V, 0x7f );
- return _mm_rsqrt_ps( vTemp );
- #elif defined(_XM_SSE3_INTRINSICS_)
- XMVECTOR vLengthSq = _mm_mul_ps(V, V);
- vLengthSq = _mm_and_ps(vLengthSq, g_XMMask3);
- vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
- vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
- vLengthSq = _mm_rsqrt_ps(vLengthSq);
- return vLengthSq;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Perform the dot product on x,y and z
- XMVECTOR vLengthSq = _mm_mul_ps(V,V);
- // vTemp has z and y
- XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(1,2,1,2));
- // x+z, y
- vLengthSq = _mm_add_ss(vLengthSq,vTemp);
- // y,y,y,y
- vTemp = XM_PERMUTE_PS(vTemp,_MM_SHUFFLE(1,1,1,1));
- // x+z+y,??,??,??
- vLengthSq = _mm_add_ss(vLengthSq,vTemp);
- // Splat the length squared
- vLengthSq = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(0,0,0,0));
- // Get the reciprocal
- vLengthSq = _mm_rsqrt_ps(vLengthSq);
- return vLengthSq;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector3ReciprocalLength
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR Result;
- Result = XMVector3LengthSq(V);
- Result = XMVectorReciprocalSqrt(Result);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Dot3
- float32x4_t vTemp = vmulq_f32( V, V );
- float32x2_t v1 = vget_low_f32( vTemp );
- float32x2_t v2 = vget_high_f32( vTemp );
- v1 = vpadd_f32( v1, v1 );
- v2 = vdup_lane_f32( v2, 0 );
- v1 = vadd_f32( v1, v2 );
- // Reciprocal sqrt
- float32x2_t S0 = vrsqrte_f32(v1);
- float32x2_t P0 = vmul_f32( v1, S0 );
- float32x2_t R0 = vrsqrts_f32( P0, S0 );
- float32x2_t S1 = vmul_f32( S0, R0 );
- float32x2_t P1 = vmul_f32( v1, S1 );
- float32x2_t R1 = vrsqrts_f32( P1, S1 );
- float32x2_t Result = vmul_f32( S1, R1 );
- return vcombine_f32( Result, Result );
- #elif defined(_XM_SSE4_INTRINSICS_)
- XMVECTOR vTemp = _mm_dp_ps( V, V, 0x7f );
- XMVECTOR vLengthSq = _mm_sqrt_ps( vTemp );
- return _mm_div_ps( g_XMOne, vLengthSq );
- #elif defined(_XM_SSE3_INTRINSICS_)
- XMVECTOR vDot = _mm_mul_ps(V, V);
- vDot = _mm_and_ps(vDot, g_XMMask3);
- vDot = _mm_hadd_ps(vDot, vDot);
- vDot = _mm_hadd_ps(vDot, vDot);
- vDot = _mm_sqrt_ps(vDot);
- vDot = _mm_div_ps(g_XMOne,vDot);
- return vDot;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Perform the dot product
- XMVECTOR vDot = _mm_mul_ps(V,V);
- // x=Dot.y, y=Dot.z
- XMVECTOR vTemp = XM_PERMUTE_PS(vDot,_MM_SHUFFLE(2,1,2,1));
- // Result.x = x+y
- vDot = _mm_add_ss(vDot,vTemp);
- // x=Dot.z
- vTemp = XM_PERMUTE_PS(vTemp,_MM_SHUFFLE(1,1,1,1));
- // Result.x = (x+y)+z
- vDot = _mm_add_ss(vDot,vTemp);
- // Splat x
- vDot = XM_PERMUTE_PS(vDot,_MM_SHUFFLE(0,0,0,0));
- // Get the reciprocal
- vDot = _mm_sqrt_ps(vDot);
- // Get the reciprocal
- vDot = _mm_div_ps(g_XMOne,vDot);
- return vDot;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector3LengthEst
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR Result;
- Result = XMVector3LengthSq(V);
- Result = XMVectorSqrtEst(Result);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Dot3
- float32x4_t vTemp = vmulq_f32( V, V );
- float32x2_t v1 = vget_low_f32( vTemp );
- float32x2_t v2 = vget_high_f32( vTemp );
- v1 = vpadd_f32( v1, v1 );
- v2 = vdup_lane_f32( v2, 0 );
- v1 = vadd_f32( v1, v2 );
- const float32x2_t zero = vdup_n_f32(0);
- uint32x2_t VEqualsZero = vceq_f32( v1, zero );
- // Sqrt (estimate)
- float32x2_t Result = vrsqrte_f32( v1 );
- Result = vmul_f32( v1, Result );
- Result = vbsl_f32( VEqualsZero, zero, Result );
- return vcombine_f32( Result, Result );
- #elif defined(_XM_SSE4_INTRINSICS_)
- XMVECTOR vTemp = _mm_dp_ps( V, V, 0x7f );
- return _mm_sqrt_ps( vTemp );
- #elif defined(_XM_SSE3_INTRINSICS_)
- XMVECTOR vLengthSq = _mm_mul_ps(V, V);
- vLengthSq = _mm_and_ps(vLengthSq, g_XMMask3);
- vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
- vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
- vLengthSq = _mm_sqrt_ps(vLengthSq);
- return vLengthSq;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Perform the dot product on x,y and z
- XMVECTOR vLengthSq = _mm_mul_ps(V,V);
- // vTemp has z and y
- XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(1,2,1,2));
- // x+z, y
- vLengthSq = _mm_add_ss(vLengthSq,vTemp);
- // y,y,y,y
- vTemp = XM_PERMUTE_PS(vTemp,_MM_SHUFFLE(1,1,1,1));
- // x+z+y,??,??,??
- vLengthSq = _mm_add_ss(vLengthSq,vTemp);
- // Splat the length squared
- vLengthSq = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(0,0,0,0));
- // Get the length
- vLengthSq = _mm_sqrt_ps(vLengthSq);
- return vLengthSq;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector3Length
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR Result;
- Result = XMVector3LengthSq(V);
- Result = XMVectorSqrt(Result);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Dot3
- float32x4_t vTemp = vmulq_f32( V, V );
- float32x2_t v1 = vget_low_f32( vTemp );
- float32x2_t v2 = vget_high_f32( vTemp );
- v1 = vpadd_f32( v1, v1 );
- v2 = vdup_lane_f32( v2, 0 );
- v1 = vadd_f32( v1, v2 );
- const float32x2_t zero = vdup_n_f32(0);
- uint32x2_t VEqualsZero = vceq_f32( v1, zero );
- // Sqrt
- float32x2_t S0 = vrsqrte_f32( v1 );
- float32x2_t P0 = vmul_f32( v1, S0 );
- float32x2_t R0 = vrsqrts_f32( P0, S0 );
- float32x2_t S1 = vmul_f32( S0, R0 );
- float32x2_t P1 = vmul_f32( v1, S1 );
- float32x2_t R1 = vrsqrts_f32( P1, S1 );
- float32x2_t Result = vmul_f32( S1, R1 );
- Result = vmul_f32( v1, Result );
- Result = vbsl_f32( VEqualsZero, zero, Result );
- return vcombine_f32( Result, Result );
- #elif defined(_XM_SSE4_INTRINSICS_)
- XMVECTOR vTemp = _mm_dp_ps( V, V, 0x7f );
- return _mm_sqrt_ps( vTemp );
- #elif defined(_XM_SSE3_INTRINSICS_)
- XMVECTOR vLengthSq = _mm_mul_ps(V, V);
- vLengthSq = _mm_and_ps(vLengthSq, g_XMMask3);
- vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
- vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
- vLengthSq = _mm_sqrt_ps(vLengthSq);
- return vLengthSq;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Perform the dot product on x,y and z
- XMVECTOR vLengthSq = _mm_mul_ps(V,V);
- // vTemp has z and y
- XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(1,2,1,2));
- // x+z, y
- vLengthSq = _mm_add_ss(vLengthSq,vTemp);
- // y,y,y,y
- vTemp = XM_PERMUTE_PS(vTemp,_MM_SHUFFLE(1,1,1,1));
- // x+z+y,??,??,??
- vLengthSq = _mm_add_ss(vLengthSq,vTemp);
- // Splat the length squared
- vLengthSq = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(0,0,0,0));
- // Get the length
- vLengthSq = _mm_sqrt_ps(vLengthSq);
- return vLengthSq;
- #endif
- }
- //------------------------------------------------------------------------------
- // XMVector3NormalizeEst uses a reciprocal estimate and
- // returns QNaN on zero and infinite vectors.
- inline XMVECTOR XM_CALLCONV XMVector3NormalizeEst
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR Result;
- Result = XMVector3ReciprocalLength(V);
- Result = XMVectorMultiply(V, Result);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Dot3
- float32x4_t vTemp = vmulq_f32( V, V );
- float32x2_t v1 = vget_low_f32( vTemp );
- float32x2_t v2 = vget_high_f32( vTemp );
- v1 = vpadd_f32( v1, v1 );
- v2 = vdup_lane_f32( v2, 0 );
- v1 = vadd_f32( v1, v2 );
- // Reciprocal sqrt (estimate)
- v2 = vrsqrte_f32( v1 );
- // Normalize
- return vmulq_f32( V, vcombine_f32(v2,v2) );
- #elif defined(_XM_SSE4_INTRINSICS_)
- XMVECTOR vTemp = _mm_dp_ps( V, V, 0x7f );
- XMVECTOR vResult = _mm_rsqrt_ps( vTemp );
- return _mm_mul_ps(vResult, V);
- #elif defined(_XM_SSE3_INTRINSICS_)
- XMVECTOR vDot = _mm_mul_ps(V, V);
- vDot = _mm_and_ps(vDot, g_XMMask3);
- vDot = _mm_hadd_ps(vDot, vDot);
- vDot = _mm_hadd_ps(vDot, vDot);
- vDot = _mm_rsqrt_ps(vDot);
- vDot = _mm_mul_ps(vDot,V);
- return vDot;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Perform the dot product
- XMVECTOR vDot = _mm_mul_ps(V,V);
- // x=Dot.y, y=Dot.z
- XMVECTOR vTemp = XM_PERMUTE_PS(vDot,_MM_SHUFFLE(2,1,2,1));
- // Result.x = x+y
- vDot = _mm_add_ss(vDot,vTemp);
- // x=Dot.z
- vTemp = XM_PERMUTE_PS(vTemp,_MM_SHUFFLE(1,1,1,1));
- // Result.x = (x+y)+z
- vDot = _mm_add_ss(vDot,vTemp);
- // Splat x
- vDot = XM_PERMUTE_PS(vDot,_MM_SHUFFLE(0,0,0,0));
- // Get the reciprocal
- vDot = _mm_rsqrt_ps(vDot);
- // Perform the normalization
- vDot = _mm_mul_ps(vDot,V);
- return vDot;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector3Normalize
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- float fLength;
- XMVECTOR vResult;
- vResult = XMVector3Length( V );
- fLength = vResult.vector4_f32[0];
- // Prevent divide by zero
- if (fLength > 0) {
- fLength = 1.0f/fLength;
- }
-
- vResult.vector4_f32[0] = V.vector4_f32[0]*fLength;
- vResult.vector4_f32[1] = V.vector4_f32[1]*fLength;
- vResult.vector4_f32[2] = V.vector4_f32[2]*fLength;
- vResult.vector4_f32[3] = V.vector4_f32[3]*fLength;
- return vResult;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Dot3
- float32x4_t vTemp = vmulq_f32( V, V );
- float32x2_t v1 = vget_low_f32( vTemp );
- float32x2_t v2 = vget_high_f32( vTemp );
- v1 = vpadd_f32( v1, v1 );
- v2 = vdup_lane_f32( v2, 0 );
- v1 = vadd_f32( v1, v2 );
- uint32x2_t VEqualsZero = vceq_f32( v1, vdup_n_f32(0) );
- uint32x2_t VEqualsInf = vceq_f32( v1, vget_low_f32(g_XMInfinity) );
- // Reciprocal sqrt (2 iterations of Newton-Raphson)
- float32x2_t S0 = vrsqrte_f32( v1 );
- float32x2_t P0 = vmul_f32( v1, S0 );
- float32x2_t R0 = vrsqrts_f32( P0, S0 );
- float32x2_t S1 = vmul_f32( S0, R0 );
- float32x2_t P1 = vmul_f32( v1, S1 );
- float32x2_t R1 = vrsqrts_f32( P1, S1 );
- v2 = vmul_f32( S1, R1 );
- // Normalize
- XMVECTOR vResult = vmulq_f32( V, vcombine_f32(v2,v2) );
- vResult = vbslq_f32( vcombine_f32(VEqualsZero,VEqualsZero), vdupq_n_f32(0), vResult );
- return vbslq_f32( vcombine_f32(VEqualsInf,VEqualsInf), g_XMQNaN, vResult );
- #elif defined(_XM_SSE4_INTRINSICS_)
- XMVECTOR vLengthSq = _mm_dp_ps( V, V, 0x7f );
- // Prepare for the division
- XMVECTOR vResult = _mm_sqrt_ps(vLengthSq);
- // Create zero with a single instruction
- XMVECTOR vZeroMask = _mm_setzero_ps();
- // Test for a divide by zero (Must be FP to detect -0.0)
- vZeroMask = _mm_cmpneq_ps(vZeroMask,vResult);
- // Failsafe on zero (Or epsilon) length planes
- // If the length is infinity, set the elements to zero
- vLengthSq = _mm_cmpneq_ps(vLengthSq,g_XMInfinity);
- // Divide to perform the normalization
- vResult = _mm_div_ps(V,vResult);
- // Any that are infinity, set to zero
- vResult = _mm_and_ps(vResult,vZeroMask);
- // Select qnan or result based on infinite length
- XMVECTOR vTemp1 = _mm_andnot_ps(vLengthSq,g_XMQNaN);
- XMVECTOR vTemp2 = _mm_and_ps(vResult,vLengthSq);
- vResult = _mm_or_ps(vTemp1,vTemp2);
- return vResult;
- #elif defined(_XM_SSE3_INTRINSICS_)
- // Perform the dot product on x,y and z only
- XMVECTOR vLengthSq = _mm_mul_ps(V, V);
- vLengthSq = _mm_and_ps(vLengthSq, g_XMMask3);
- vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
- vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
- // Prepare for the division
- XMVECTOR vResult = _mm_sqrt_ps(vLengthSq);
- // Create zero with a single instruction
- XMVECTOR vZeroMask = _mm_setzero_ps();
- // Test for a divide by zero (Must be FP to detect -0.0)
- vZeroMask = _mm_cmpneq_ps(vZeroMask,vResult);
- // Failsafe on zero (Or epsilon) length planes
- // If the length is infinity, set the elements to zero
- vLengthSq = _mm_cmpneq_ps(vLengthSq,g_XMInfinity);
- // Divide to perform the normalization
- vResult = _mm_div_ps(V,vResult);
- // Any that are infinity, set to zero
- vResult = _mm_and_ps(vResult,vZeroMask);
- // Select qnan or result based on infinite length
- XMVECTOR vTemp1 = _mm_andnot_ps(vLengthSq,g_XMQNaN);
- XMVECTOR vTemp2 = _mm_and_ps(vResult,vLengthSq);
- vResult = _mm_or_ps(vTemp1,vTemp2);
- return vResult;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Perform the dot product on x,y and z only
- XMVECTOR vLengthSq = _mm_mul_ps(V,V);
- XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(2,1,2,1));
- vLengthSq = _mm_add_ss(vLengthSq,vTemp);
- vTemp = XM_PERMUTE_PS(vTemp,_MM_SHUFFLE(1,1,1,1));
- vLengthSq = _mm_add_ss(vLengthSq,vTemp);
- vLengthSq = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(0,0,0,0));
- // Prepare for the division
- XMVECTOR vResult = _mm_sqrt_ps(vLengthSq);
- // Create zero with a single instruction
- XMVECTOR vZeroMask = _mm_setzero_ps();
- // Test for a divide by zero (Must be FP to detect -0.0)
- vZeroMask = _mm_cmpneq_ps(vZeroMask,vResult);
- // Failsafe on zero (Or epsilon) length planes
- // If the length is infinity, set the elements to zero
- vLengthSq = _mm_cmpneq_ps(vLengthSq,g_XMInfinity);
- // Divide to perform the normalization
- vResult = _mm_div_ps(V,vResult);
- // Any that are infinity, set to zero
- vResult = _mm_and_ps(vResult,vZeroMask);
- // Select qnan or result based on infinite length
- XMVECTOR vTemp1 = _mm_andnot_ps(vLengthSq,g_XMQNaN);
- XMVECTOR vTemp2 = _mm_and_ps(vResult,vLengthSq);
- vResult = _mm_or_ps(vTemp1,vTemp2);
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector3ClampLength
- (
- FXMVECTOR V,
- float LengthMin,
- float LengthMax
- )
- {
- XMVECTOR ClampMax = XMVectorReplicate(LengthMax);
- XMVECTOR ClampMin = XMVectorReplicate(LengthMin);
- return XMVector3ClampLengthV(V, ClampMin, ClampMax);
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector3ClampLengthV
- (
- FXMVECTOR V,
- FXMVECTOR LengthMin,
- FXMVECTOR LengthMax
- )
- {
- assert((XMVectorGetY(LengthMin) == XMVectorGetX(LengthMin)) && (XMVectorGetZ(LengthMin) == XMVectorGetX(LengthMin)));
- assert((XMVectorGetY(LengthMax) == XMVectorGetX(LengthMax)) && (XMVectorGetZ(LengthMax) == XMVectorGetX(LengthMax)));
- assert(XMVector3GreaterOrEqual(LengthMin, XMVectorZero()));
- assert(XMVector3GreaterOrEqual(LengthMax, XMVectorZero()));
- assert(XMVector3GreaterOrEqual(LengthMax, LengthMin));
- XMVECTOR LengthSq = XMVector3LengthSq(V);
- const XMVECTOR Zero = XMVectorZero();
- XMVECTOR RcpLength = XMVectorReciprocalSqrt(LengthSq);
- XMVECTOR InfiniteLength = XMVectorEqualInt(LengthSq, g_XMInfinity.v);
- XMVECTOR ZeroLength = XMVectorEqual(LengthSq, Zero);
- XMVECTOR Normal = XMVectorMultiply(V, RcpLength);
- XMVECTOR Length = XMVectorMultiply(LengthSq, RcpLength);
- XMVECTOR Select = XMVectorEqualInt(InfiniteLength, ZeroLength);
- Length = XMVectorSelect(LengthSq, Length, Select);
- Normal = XMVectorSelect(LengthSq, Normal, Select);
- XMVECTOR ControlMax = XMVectorGreater(Length, LengthMax);
- XMVECTOR ControlMin = XMVectorLess(Length, LengthMin);
- XMVECTOR ClampLength = XMVectorSelect(Length, LengthMax, ControlMax);
- ClampLength = XMVectorSelect(ClampLength, LengthMin, ControlMin);
- XMVECTOR Result = XMVectorMultiply(Normal, ClampLength);
- // Preserve the original vector (with no precision loss) if the length falls within the given range
- XMVECTOR Control = XMVectorEqualInt(ControlMax, ControlMin);
- Result = XMVectorSelect(Result, V, Control);
- return Result;
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector3Reflect
- (
- FXMVECTOR Incident,
- FXMVECTOR Normal
- )
- {
- // Result = Incident - (2 * dot(Incident, Normal)) * Normal
- XMVECTOR Result = XMVector3Dot(Incident, Normal);
- Result = XMVectorAdd(Result, Result);
- Result = XMVectorNegativeMultiplySubtract(Result, Normal, Incident);
- return Result;
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector3Refract
- (
- FXMVECTOR Incident,
- FXMVECTOR Normal,
- float RefractionIndex
- )
- {
- XMVECTOR Index = XMVectorReplicate(RefractionIndex);
- return XMVector3RefractV(Incident, Normal, Index);
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector3RefractV
- (
- FXMVECTOR Incident,
- FXMVECTOR Normal,
- FXMVECTOR RefractionIndex
- )
- {
- // Result = RefractionIndex * Incident - Normal * (RefractionIndex * dot(Incident, Normal) +
- // sqrt(1 - RefractionIndex * RefractionIndex * (1 - dot(Incident, Normal) * dot(Incident, Normal))))
- #if defined(_XM_NO_INTRINSICS_)
- const XMVECTOR Zero = XMVectorZero();
- XMVECTOR IDotN = XMVector3Dot(Incident, Normal);
- // R = 1.0f - RefractionIndex * RefractionIndex * (1.0f - IDotN * IDotN)
- XMVECTOR R = XMVectorNegativeMultiplySubtract(IDotN, IDotN, g_XMOne.v);
- R = XMVectorMultiply(R, RefractionIndex);
- R = XMVectorNegativeMultiplySubtract(R, RefractionIndex, g_XMOne.v);
- if (XMVector4LessOrEqual(R, Zero))
- {
- // Total internal reflection
- return Zero;
- }
- else
- {
- // R = RefractionIndex * IDotN + sqrt(R)
- R = XMVectorSqrt(R);
- R = XMVectorMultiplyAdd(RefractionIndex, IDotN, R);
- // Result = RefractionIndex * Incident - Normal * R
- XMVECTOR Result = XMVectorMultiply(RefractionIndex, Incident);
- Result = XMVectorNegativeMultiplySubtract(Normal, R, Result);
- return Result;
- }
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- XMVECTOR IDotN = XMVector3Dot(Incident,Normal);
- // R = 1.0f - RefractionIndex * RefractionIndex * (1.0f - IDotN * IDotN)
- float32x4_t R = vmlsq_f32( g_XMOne, IDotN, IDotN);
- R = vmulq_f32(R, RefractionIndex);
- R = vmlsq_f32(g_XMOne, R, RefractionIndex );
- uint32x4_t vResult = vcleq_f32(R,g_XMZero);
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- if ( vget_lane_u32(vTemp.val[1], 1) == 0xFFFFFFFFU )
- {
- // Total internal reflection
- vResult = g_XMZero;
- }
- else
- {
- // Sqrt(R)
- float32x4_t S0 = vrsqrteq_f32(R);
- float32x4_t P0 = vmulq_f32( R, S0 );
- float32x4_t R0 = vrsqrtsq_f32( P0, S0 );
- float32x4_t S1 = vmulq_f32( S0, R0 );
- float32x4_t P1 = vmulq_f32( R, S1 );
- float32x4_t R1 = vrsqrtsq_f32( P1, S1 );
- float32x4_t S2 = vmulq_f32( S1, R1 );
- R = vmulq_f32( R, S2 );
- // R = RefractionIndex * IDotN + sqrt(R)
- R = vmlaq_f32( R, RefractionIndex, IDotN );
- // Result = RefractionIndex * Incident - Normal * R
- vResult = vmulq_f32(RefractionIndex, Incident);
- vResult = vmlsq_f32( vResult, R, Normal );
- }
- return vResult;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Result = RefractionIndex * Incident - Normal * (RefractionIndex * dot(Incident, Normal) +
- // sqrt(1 - RefractionIndex * RefractionIndex * (1 - dot(Incident, Normal) * dot(Incident, Normal))))
- XMVECTOR IDotN = XMVector3Dot(Incident, Normal);
- // R = 1.0f - RefractionIndex * RefractionIndex * (1.0f - IDotN * IDotN)
- XMVECTOR R = _mm_mul_ps(IDotN, IDotN);
- R = _mm_sub_ps(g_XMOne,R);
- R = _mm_mul_ps(R, RefractionIndex);
- R = _mm_mul_ps(R, RefractionIndex);
- R = _mm_sub_ps(g_XMOne,R);
- XMVECTOR vResult = _mm_cmple_ps(R,g_XMZero);
- if (_mm_movemask_ps(vResult)==0x0f)
- {
- // Total internal reflection
- vResult = g_XMZero;
- }
- else
- {
- // R = RefractionIndex * IDotN + sqrt(R)
- R = _mm_sqrt_ps(R);
- vResult = _mm_mul_ps(RefractionIndex,IDotN);
- R = _mm_add_ps(R,vResult);
- // Result = RefractionIndex * Incident - Normal * R
- vResult = _mm_mul_ps(RefractionIndex, Incident);
- R = _mm_mul_ps(R,Normal);
- vResult = _mm_sub_ps(vResult,R);
- }
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector3Orthogonal
- (
- FXMVECTOR V
- )
- {
- XMVECTOR Zero = XMVectorZero();
- XMVECTOR Z = XMVectorSplatZ(V);
- XMVECTOR YZYY = XMVectorSwizzle<XM_SWIZZLE_Y, XM_SWIZZLE_Z, XM_SWIZZLE_Y, XM_SWIZZLE_Y>(V);
- XMVECTOR NegativeV = XMVectorSubtract(Zero, V);
- XMVECTOR ZIsNegative = XMVectorLess(Z, Zero);
- XMVECTOR YZYYIsNegative = XMVectorLess(YZYY, Zero);
- XMVECTOR S = XMVectorAdd(YZYY, Z);
- XMVECTOR D = XMVectorSubtract(YZYY, Z);
- XMVECTOR Select = XMVectorEqualInt(ZIsNegative, YZYYIsNegative);
- XMVECTOR R0 = XMVectorPermute<XM_PERMUTE_1X, XM_PERMUTE_0X, XM_PERMUTE_0X, XM_PERMUTE_0X>(NegativeV, S);
- XMVECTOR R1 = XMVectorPermute<XM_PERMUTE_1X, XM_PERMUTE_0X, XM_PERMUTE_0X, XM_PERMUTE_0X>(V, D);
- return XMVectorSelect(R1, R0, Select);
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector3AngleBetweenNormalsEst
- (
- FXMVECTOR N1,
- FXMVECTOR N2
- )
- {
- XMVECTOR Result = XMVector3Dot(N1, N2);
- Result = XMVectorClamp(Result, g_XMNegativeOne.v, g_XMOne.v);
- Result = XMVectorACosEst(Result);
- return Result;
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector3AngleBetweenNormals
- (
- FXMVECTOR N1,
- FXMVECTOR N2
- )
- {
- XMVECTOR Result = XMVector3Dot(N1, N2);
- Result = XMVectorClamp(Result, g_XMNegativeOne.v, g_XMOne.v);
- Result = XMVectorACos(Result);
- return Result;
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector3AngleBetweenVectors
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- XMVECTOR L1 = XMVector3ReciprocalLength(V1);
- XMVECTOR L2 = XMVector3ReciprocalLength(V2);
- XMVECTOR Dot = XMVector3Dot(V1, V2);
- L1 = XMVectorMultiply(L1, L2);
- XMVECTOR CosAngle = XMVectorMultiply(Dot, L1);
- CosAngle = XMVectorClamp(CosAngle, g_XMNegativeOne.v, g_XMOne.v);
- return XMVectorACos(CosAngle);
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector3LinePointDistance
- (
- FXMVECTOR LinePoint1,
- FXMVECTOR LinePoint2,
- FXMVECTOR Point
- )
- {
- // Given a vector PointVector from LinePoint1 to Point and a vector
- // LineVector from LinePoint1 to LinePoint2, the scaled distance
- // PointProjectionScale from LinePoint1 to the perpendicular projection
- // of PointVector onto the line is defined as:
- //
- // PointProjectionScale = dot(PointVector, LineVector) / LengthSq(LineVector)
- XMVECTOR PointVector = XMVectorSubtract(Point, LinePoint1);
- XMVECTOR LineVector = XMVectorSubtract(LinePoint2, LinePoint1);
- XMVECTOR LengthSq = XMVector3LengthSq(LineVector);
- XMVECTOR PointProjectionScale = XMVector3Dot(PointVector, LineVector);
- PointProjectionScale = XMVectorDivide(PointProjectionScale, LengthSq);
- XMVECTOR DistanceVector = XMVectorMultiply(LineVector, PointProjectionScale);
- DistanceVector = XMVectorSubtract(PointVector, DistanceVector);
- return XMVector3Length(DistanceVector);
- }
- //------------------------------------------------------------------------------
- _Use_decl_annotations_
- inline void XM_CALLCONV XMVector3ComponentsFromNormal
- (
- XMVECTOR* pParallel,
- XMVECTOR* pPerpendicular,
- FXMVECTOR V,
- FXMVECTOR Normal
- )
- {
- assert(pParallel != nullptr);
- assert(pPerpendicular != nullptr);
- XMVECTOR Scale = XMVector3Dot(V, Normal);
- XMVECTOR Parallel = XMVectorMultiply(Normal, Scale);
- *pParallel = Parallel;
- *pPerpendicular = XMVectorSubtract(V, Parallel);
- }
- //------------------------------------------------------------------------------
- // Transform a vector using a rotation expressed as a unit quaternion
- inline XMVECTOR XM_CALLCONV XMVector3Rotate
- (
- FXMVECTOR V,
- FXMVECTOR RotationQuaternion
- )
- {
- XMVECTOR A = XMVectorSelect(g_XMSelect1110.v, V, g_XMSelect1110.v);
- XMVECTOR Q = XMQuaternionConjugate(RotationQuaternion);
- XMVECTOR Result = XMQuaternionMultiply(Q, A);
- return XMQuaternionMultiply(Result, RotationQuaternion);
- }
- //------------------------------------------------------------------------------
- // Transform a vector using the inverse of a rotation expressed as a unit quaternion
- inline XMVECTOR XM_CALLCONV XMVector3InverseRotate
- (
- FXMVECTOR V,
- FXMVECTOR RotationQuaternion
- )
- {
- XMVECTOR A = XMVectorSelect(g_XMSelect1110.v, V, g_XMSelect1110.v);
- XMVECTOR Result = XMQuaternionMultiply(RotationQuaternion, A);
- XMVECTOR Q = XMQuaternionConjugate(RotationQuaternion);
- return XMQuaternionMultiply(Result, Q);
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector3Transform
- (
- FXMVECTOR V,
- FXMMATRIX M
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR Z = XMVectorSplatZ(V);
- XMVECTOR Y = XMVectorSplatY(V);
- XMVECTOR X = XMVectorSplatX(V);
- XMVECTOR Result = XMVectorMultiplyAdd(Z, M.r[2], M.r[3]);
- Result = XMVectorMultiplyAdd(Y, M.r[1], Result);
- Result = XMVectorMultiplyAdd(X, M.r[0], Result);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- float32x2_t VL = vget_low_f32( V );
- XMVECTOR vResult = vmlaq_lane_f32( M.r[3], M.r[0], VL, 0 ); // X
- vResult = vmlaq_lane_f32( vResult, M.r[1], VL, 1 ); // Y
- return vmlaq_lane_f32( vResult, M.r[2], vget_high_f32( V ), 0 ); // Z
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vResult = XM_PERMUTE_PS(V,_MM_SHUFFLE(0,0,0,0));
- vResult = _mm_mul_ps(vResult,M.r[0]);
- XMVECTOR vTemp = XM_PERMUTE_PS(V,_MM_SHUFFLE(1,1,1,1));
- vTemp = _mm_mul_ps(vTemp,M.r[1]);
- vResult = _mm_add_ps(vResult,vTemp);
- vTemp = XM_PERMUTE_PS(V,_MM_SHUFFLE(2,2,2,2));
- vTemp = _mm_mul_ps(vTemp,M.r[2]);
- vResult = _mm_add_ps(vResult,vTemp);
- vResult = _mm_add_ps(vResult,M.r[3]);
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- #ifdef _PREFAST_
- #pragma prefast(push)
- #pragma prefast(disable : 26015 26019, "PREfast noise: Esp:1307" )
- #endif
- _Use_decl_annotations_
- inline XMFLOAT4* XM_CALLCONV XMVector3TransformStream
- (
- XMFLOAT4* pOutputStream,
- size_t OutputStride,
- const XMFLOAT3* pInputStream,
- size_t InputStride,
- size_t VectorCount,
- FXMMATRIX M
- )
- {
- assert(pOutputStream != nullptr);
- assert(pInputStream != nullptr);
- assert(InputStride >= sizeof(XMFLOAT3));
- _Analysis_assume_(InputStride >= sizeof(XMFLOAT3));
- assert(OutputStride >= sizeof(XMFLOAT4));
- _Analysis_assume_(OutputStride >= sizeof(XMFLOAT4));
- #if defined(_XM_NO_INTRINSICS_)
- const uint8_t* pInputVector = (const uint8_t*)pInputStream;
- uint8_t* pOutputVector = (uint8_t*)pOutputStream;
- const XMVECTOR row0 = M.r[0];
- const XMVECTOR row1 = M.r[1];
- const XMVECTOR row2 = M.r[2];
- const XMVECTOR row3 = M.r[3];
- for (size_t i = 0; i < VectorCount; i++)
- {
- XMVECTOR V = XMLoadFloat3((const XMFLOAT3*)pInputVector);
- XMVECTOR Z = XMVectorSplatZ(V);
- XMVECTOR Y = XMVectorSplatY(V);
- XMVECTOR X = XMVectorSplatX(V);
- XMVECTOR Result = XMVectorMultiplyAdd(Z, row2, row3);
- Result = XMVectorMultiplyAdd(Y, row1, Result);
- Result = XMVectorMultiplyAdd(X, row0, Result);
- XMStoreFloat4((XMFLOAT4*)pOutputVector, Result);
- pInputVector += InputStride;
- pOutputVector += OutputStride;
- }
- return pOutputStream;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- const uint8_t* pInputVector = (const uint8_t*)pInputStream;
- uint8_t* pOutputVector = (uint8_t*)pOutputStream;
- const XMVECTOR row0 = M.r[0];
- const XMVECTOR row1 = M.r[1];
- const XMVECTOR row2 = M.r[2];
- const XMVECTOR row3 = M.r[3];
- size_t i = 0;
- size_t four = VectorCount >> 2;
- if ( four > 0 )
- {
- if ((InputStride == sizeof(XMFLOAT3)) && (OutputStride == sizeof(XMFLOAT4)))
- {
- for (size_t j = 0; j < four; ++j)
- {
- float32x4x3_t V = vld3q_f32( reinterpret_cast<const float*>(pInputVector) );
- pInputVector += sizeof(XMFLOAT3)*4;
- float32x2_t r3 = vget_low_f32( row3 );
- float32x2_t r = vget_low_f32( row0 );
- XMVECTOR vResult0 = vmlaq_lane_f32( vdupq_lane_f32( r3, 0 ), V.val[0], r, 0 ); // Ax+M
- XMVECTOR vResult1 = vmlaq_lane_f32( vdupq_lane_f32( r3, 1 ), V.val[0], r, 1 ); // Bx+N
- __prefetch( pInputVector );
- r3 = vget_high_f32( row3 );
- r = vget_high_f32( row0 );
- XMVECTOR vResult2 = vmlaq_lane_f32( vdupq_lane_f32( r3, 0 ), V.val[0], r, 0 ); // Cx+O
- XMVECTOR vResult3 = vmlaq_lane_f32( vdupq_lane_f32( r3, 1 ), V.val[0], r, 1 ); // Dx+P
-
- __prefetch( pInputVector+XM_CACHE_LINE_SIZE );
- r = vget_low_f32( row1 );
- vResult0 = vmlaq_lane_f32( vResult0, V.val[1], r, 0 ); // Ax+Ey+M
- vResult1 = vmlaq_lane_f32( vResult1, V.val[1], r, 1 ); // Bx+Fy+N
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*2) );
- r = vget_high_f32( row1 );
- vResult2 = vmlaq_lane_f32( vResult2, V.val[1], r, 0 ); // Cx+Gy+O
- vResult3 = vmlaq_lane_f32( vResult3, V.val[1], r, 1 ); // Dx+Hy+P
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*3) );
- r = vget_low_f32( row2 );
- vResult0 = vmlaq_lane_f32( vResult0, V.val[2], r, 0 ); // Ax+Ey+Iz+M
- vResult1 = vmlaq_lane_f32( vResult1, V.val[2], r, 1 ); // Bx+Fy+Jz+N
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*4) );
- r = vget_high_f32( row2 );
- vResult2 = vmlaq_lane_f32( vResult2, V.val[2], r, 0 ); // Cx+Gy+Kz+O
- vResult3 = vmlaq_lane_f32( vResult3, V.val[2], r, 1 ); // Dx+Hy+Lz+P
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*5) );
- float32x4x4_t R;
- R.val[0] = vResult0;
- R.val[1] = vResult1;
- R.val[2] = vResult2;
- R.val[3] = vResult3;
- vst4q_f32( reinterpret_cast<float*>(pOutputVector), R );
- pOutputVector += sizeof(XMFLOAT4)*4;
- i += 4;
- }
- }
- }
- for (; i < VectorCount; i++)
- {
- float32x2_t VL = vld1_f32( reinterpret_cast<const float*>(pInputVector) );
- float32x2_t zero = vdup_n_f32(0);
- float32x2_t VH = vld1_lane_f32( reinterpret_cast<const float*>(pInputVector)+2, zero, 0 );
- pInputVector += InputStride;
- XMVECTOR vResult = vmlaq_lane_f32( row3, row0, VL, 0 ); // X
- vResult = vmlaq_lane_f32( vResult, row1, VL, 1); // Y
- vResult = vmlaq_lane_f32( vResult, row2, VH, 0 ); // Z
- vst1q_f32( reinterpret_cast<float*>(pOutputVector), vResult );
- pOutputVector += OutputStride;
- }
- return pOutputStream;
- #elif defined(_XM_SSE_INTRINSICS_)
- const uint8_t* pInputVector = (const uint8_t*)pInputStream;
- uint8_t* pOutputVector = (uint8_t*)pOutputStream;
- const XMVECTOR row0 = M.r[0];
- const XMVECTOR row1 = M.r[1];
- const XMVECTOR row2 = M.r[2];
- const XMVECTOR row3 = M.r[3];
- size_t i = 0;
- size_t four = VectorCount >> 2;
- if ( four > 0 )
- {
- if (InputStride == sizeof(XMFLOAT3))
- {
- if ( !((uintptr_t)pOutputStream & 0xF) && !(OutputStride & 0xF) )
- {
- // Packed input, aligned output
- for (size_t j = 0; j < four; ++j)
- {
- __m128 V1 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector) );
- __m128 L2 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector+16) );
- __m128 L3 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector+32) );
- pInputVector += sizeof(XMFLOAT3)*4;
- // Unpack the 4 vectors (.w components are junk)
- XM3UNPACK3INTO4(V1,L2,L3);
- // Result 1
- XMVECTOR Z = XM_PERMUTE_PS( V1, _MM_SHUFFLE(2, 2, 2, 2) );
- XMVECTOR Y = XM_PERMUTE_PS( V1, _MM_SHUFFLE(1, 1, 1, 1) );
- XMVECTOR X = XM_PERMUTE_PS( V1, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Z, row2 );
- XMVECTOR vTemp2 = _mm_mul_ps( Y, row1 );
- XMVECTOR vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- XM_STREAM_PS( reinterpret_cast<float*>(pOutputVector), vTemp );
- pOutputVector += OutputStride;
- // Result 2
- Z = XM_PERMUTE_PS( V2, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V2, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V2, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, row2 );
- vTemp2 = _mm_mul_ps( Y, row1 );
- vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- XM_STREAM_PS( reinterpret_cast<float*>(pOutputVector), vTemp );
- pOutputVector += OutputStride;
- // Result 3
- Z = XM_PERMUTE_PS( V3, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V3, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V3, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, row2 );
- vTemp2 = _mm_mul_ps( Y, row1 );
- vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- XM_STREAM_PS( reinterpret_cast<float*>(pOutputVector), vTemp );
- pOutputVector += OutputStride;
- // Result 4
- Z = XM_PERMUTE_PS( V4, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V4, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V4, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, row2 );
- vTemp2 = _mm_mul_ps( Y, row1 );
- vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- XM_STREAM_PS( reinterpret_cast<float*>(pOutputVector), vTemp );
- pOutputVector += OutputStride;
- i += 4;
- }
- }
- else
- {
- // Packed input, unaligned output
- for (size_t j = 0; j < four; ++j)
- {
- __m128 V1 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector) );
- __m128 L2 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector+16) );
- __m128 L3 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector+32) );
- pInputVector += sizeof(XMFLOAT3)*4;
- // Unpack the 4 vectors (.w components are junk)
- XM3UNPACK3INTO4(V1,L2,L3);
- // Result 1
- XMVECTOR Z = XM_PERMUTE_PS( V1, _MM_SHUFFLE(2, 2, 2, 2) );
- XMVECTOR Y = XM_PERMUTE_PS( V1, _MM_SHUFFLE(1, 1, 1, 1) );
- XMVECTOR X = XM_PERMUTE_PS( V1, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Z, row2 );
- XMVECTOR vTemp2 = _mm_mul_ps( Y, row1 );
- XMVECTOR vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- _mm_storeu_ps( reinterpret_cast<float*>(pOutputVector), vTemp );
- pOutputVector += OutputStride;
- // Result 2
- Z = XM_PERMUTE_PS( V2, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V2, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V2, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, row2 );
- vTemp2 = _mm_mul_ps( Y, row1 );
- vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- _mm_storeu_ps( reinterpret_cast<float*>(pOutputVector), vTemp );
- pOutputVector += OutputStride;
- // Result 3
- Z = XM_PERMUTE_PS( V3, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V3, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V3, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, row2 );
- vTemp2 = _mm_mul_ps( Y, row1 );
- vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- _mm_storeu_ps( reinterpret_cast<float*>(pOutputVector), vTemp );
- pOutputVector += OutputStride;
- // Result 4
- Z = XM_PERMUTE_PS( V4, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V4, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V4, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, row2 );
- vTemp2 = _mm_mul_ps( Y, row1 );
- vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- _mm_storeu_ps( reinterpret_cast<float*>(pOutputVector), vTemp );
- pOutputVector += OutputStride;
- i += 4;
- }
- }
- }
- }
- if ( !((uintptr_t)pOutputStream & 0xF) && !(OutputStride & 0xF) )
- {
- // Aligned output
- for (; i < VectorCount; ++i)
- {
- XMVECTOR V = XMLoadFloat3(reinterpret_cast<const XMFLOAT3*>(pInputVector));
- pInputVector += InputStride;
- XMVECTOR Z = XM_PERMUTE_PS( V, _MM_SHUFFLE(2, 2, 2, 2) );
- XMVECTOR Y = XM_PERMUTE_PS( V, _MM_SHUFFLE(1, 1, 1, 1) );
- XMVECTOR X = XM_PERMUTE_PS( V, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Z, row2 );
- XMVECTOR vTemp2 = _mm_mul_ps( Y, row1 );
- XMVECTOR vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- XM_STREAM_PS( reinterpret_cast<float*>(pOutputVector), vTemp );
- pOutputVector += OutputStride;
- }
- }
- else
- {
- // Unaligned output
- for (; i < VectorCount; ++i)
- {
- XMVECTOR V = XMLoadFloat3(reinterpret_cast<const XMFLOAT3*>(pInputVector));
- pInputVector += InputStride;
- XMVECTOR Z = XM_PERMUTE_PS( V, _MM_SHUFFLE(2, 2, 2, 2) );
- XMVECTOR Y = XM_PERMUTE_PS( V, _MM_SHUFFLE(1, 1, 1, 1) );
- XMVECTOR X = XM_PERMUTE_PS( V, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Z, row2 );
- XMVECTOR vTemp2 = _mm_mul_ps( Y, row1 );
- XMVECTOR vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- _mm_storeu_ps( reinterpret_cast<float*>(pOutputVector), vTemp );
- pOutputVector += OutputStride;
- }
- }
- XM_SFENCE();
- return pOutputStream;
- #endif
- }
- #ifdef _PREFAST_
- #pragma prefast(pop)
- #endif
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector3TransformCoord
- (
- FXMVECTOR V,
- FXMMATRIX M
- )
- {
- XMVECTOR Z = XMVectorSplatZ(V);
- XMVECTOR Y = XMVectorSplatY(V);
- XMVECTOR X = XMVectorSplatX(V);
- XMVECTOR Result = XMVectorMultiplyAdd(Z, M.r[2], M.r[3]);
- Result = XMVectorMultiplyAdd(Y, M.r[1], Result);
- Result = XMVectorMultiplyAdd(X, M.r[0], Result);
- XMVECTOR W = XMVectorSplatW(Result);
- return XMVectorDivide( Result, W );
- }
- //------------------------------------------------------------------------------
- #ifdef _PREFAST_
- #pragma prefast(push)
- #pragma prefast(disable : 26015 26019, "PREfast noise: Esp:1307" )
- #endif
- _Use_decl_annotations_
- inline XMFLOAT3* XM_CALLCONV XMVector3TransformCoordStream
- (
- XMFLOAT3* pOutputStream,
- size_t OutputStride,
- const XMFLOAT3* pInputStream,
- size_t InputStride,
- size_t VectorCount,
- FXMMATRIX M
- )
- {
- assert(pOutputStream != nullptr);
- assert(pInputStream != nullptr);
- assert(InputStride >= sizeof(XMFLOAT3));
- _Analysis_assume_(InputStride >= sizeof(XMFLOAT3));
- assert(OutputStride >= sizeof(XMFLOAT3));
- _Analysis_assume_(OutputStride >= sizeof(XMFLOAT3));
- #if defined(_XM_NO_INTRINSICS_)
- const uint8_t* pInputVector = (const uint8_t*)pInputStream;
- uint8_t* pOutputVector = (uint8_t*)pOutputStream;
- const XMVECTOR row0 = M.r[0];
- const XMVECTOR row1 = M.r[1];
- const XMVECTOR row2 = M.r[2];
- const XMVECTOR row3 = M.r[3];
- for (size_t i = 0; i < VectorCount; i++)
- {
- XMVECTOR V = XMLoadFloat3((const XMFLOAT3*)pInputVector);
- XMVECTOR Z = XMVectorSplatZ(V);
- XMVECTOR Y = XMVectorSplatY(V);
- XMVECTOR X = XMVectorSplatX(V);
- XMVECTOR Result = XMVectorMultiplyAdd(Z, row2, row3);
- Result = XMVectorMultiplyAdd(Y, row1, Result);
- Result = XMVectorMultiplyAdd(X, row0, Result);
- XMVECTOR W = XMVectorSplatW(Result);
- Result = XMVectorDivide(Result, W);
- XMStoreFloat3((XMFLOAT3*)pOutputVector, Result);
- pInputVector += InputStride;
- pOutputVector += OutputStride;
- }
- return pOutputStream;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- const uint8_t* pInputVector = (const uint8_t*)pInputStream;
- uint8_t* pOutputVector = (uint8_t*)pOutputStream;
- const XMVECTOR row0 = M.r[0];
- const XMVECTOR row1 = M.r[1];
- const XMVECTOR row2 = M.r[2];
- const XMVECTOR row3 = M.r[3];
- size_t i = 0;
- size_t four = VectorCount >> 2;
- if ( four > 0 )
- {
- if ((InputStride == sizeof(XMFLOAT3)) && (OutputStride == sizeof(XMFLOAT3)))
- {
- for (size_t j = 0; j < four; ++j)
- {
- float32x4x3_t V = vld3q_f32( reinterpret_cast<const float*>(pInputVector) );
- pInputVector += sizeof(XMFLOAT3)*4;
- float32x2_t r3 = vget_low_f32( row3 );
- float32x2_t r = vget_low_f32( row0 );
- XMVECTOR vResult0 = vmlaq_lane_f32( vdupq_lane_f32( r3, 0 ), V.val[0], r, 0 ); // Ax+M
- XMVECTOR vResult1 = vmlaq_lane_f32( vdupq_lane_f32( r3, 1 ), V.val[0], r, 1 ); // Bx+N
- __prefetch( pInputVector );
- r3 = vget_high_f32( row3 );
- r = vget_high_f32( row0 );
- XMVECTOR vResult2 = vmlaq_lane_f32( vdupq_lane_f32( r3, 0 ), V.val[0], r, 0 ); // Cx+O
- XMVECTOR W = vmlaq_lane_f32( vdupq_lane_f32( r3, 1 ), V.val[0], r, 1 ); // Dx+P
-
- __prefetch( pInputVector+XM_CACHE_LINE_SIZE );
- r = vget_low_f32( row1 );
- vResult0 = vmlaq_lane_f32( vResult0, V.val[1], r, 0 ); // Ax+Ey+M
- vResult1 = vmlaq_lane_f32( vResult1, V.val[1], r, 1 ); // Bx+Fy+N
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*2) );
- r = vget_high_f32( row1 );
- vResult2 = vmlaq_lane_f32( vResult2, V.val[1], r, 0 ); // Cx+Gy+O
- W = vmlaq_lane_f32( W, V.val[1], r, 1 ); // Dx+Hy+P
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*3) );
- r = vget_low_f32( row2 );
- vResult0 = vmlaq_lane_f32( vResult0, V.val[2], r, 0 ); // Ax+Ey+Iz+M
- vResult1 = vmlaq_lane_f32( vResult1, V.val[2], r, 1 ); // Bx+Fy+Jz+N
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*4) );
- r = vget_high_f32( row2 );
- vResult2 = vmlaq_lane_f32( vResult2, V.val[2], r, 0 ); // Cx+Gy+Kz+O
- W = vmlaq_lane_f32( W, V.val[2], r, 1 ); // Dx+Hy+Lz+P
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*5) );
- #if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64)
- V.val[0] = vdivq_f32( vResult0, W );
- V.val[1] = vdivq_f32( vResult1, W );
- V.val[2] = vdivq_f32( vResult2, W );
- #else
- // 2 iterations of Newton-Raphson refinement of reciprocal
- float32x4_t Reciprocal = vrecpeq_f32(W);
- float32x4_t S = vrecpsq_f32( Reciprocal, W );
- Reciprocal = vmulq_f32( S, Reciprocal );
- S = vrecpsq_f32( Reciprocal, W );
- Reciprocal = vmulq_f32( S, Reciprocal );
-
- V.val[0] = vmulq_f32( vResult0, Reciprocal );
- V.val[1] = vmulq_f32( vResult1, Reciprocal );
- V.val[2] = vmulq_f32( vResult2, Reciprocal );
- #endif
- vst3q_f32( reinterpret_cast<float*>(pOutputVector),V );
- pOutputVector += sizeof(XMFLOAT3)*4;
- i += 4;
- }
- }
- }
- for (; i < VectorCount; i++)
- {
- float32x2_t VL = vld1_f32( reinterpret_cast<const float*>(pInputVector) );
- float32x2_t zero = vdup_n_f32(0);
- float32x2_t VH = vld1_lane_f32( reinterpret_cast<const float*>(pInputVector)+2, zero, 0 );
- pInputVector += InputStride;
- XMVECTOR vResult = vmlaq_lane_f32( row3, row0, VL, 0 ); // X
- vResult = vmlaq_lane_f32( vResult, row1, VL, 1 ); // Y
- vResult = vmlaq_lane_f32( vResult, row2, VH, 0 ); // Z
- VH = vget_high_f32(vResult);
- XMVECTOR W = vdupq_lane_f32( VH, 1 );
- #if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64)
- vResult = vdivq_f32( vResult, W );
- #else
- // 2 iterations of Newton-Raphson refinement of reciprocal for W
- float32x4_t Reciprocal = vrecpeq_f32( W );
- float32x4_t S = vrecpsq_f32( Reciprocal, W );
- Reciprocal = vmulq_f32( S, Reciprocal );
- S = vrecpsq_f32( Reciprocal, W );
- Reciprocal = vmulq_f32( S, Reciprocal );
- vResult = vmulq_f32( vResult, Reciprocal );
- #endif
- VL = vget_low_f32( vResult );
- vst1_f32( reinterpret_cast<float*>(pOutputVector), VL );
- vst1q_lane_f32( reinterpret_cast<float*>(pOutputVector)+2, vResult, 2 );
- pOutputVector += OutputStride;
- }
- return pOutputStream;
- #elif defined(_XM_SSE_INTRINSICS_)
- const uint8_t* pInputVector = (const uint8_t*)pInputStream;
- uint8_t* pOutputVector = (uint8_t*)pOutputStream;
- const XMVECTOR row0 = M.r[0];
- const XMVECTOR row1 = M.r[1];
- const XMVECTOR row2 = M.r[2];
- const XMVECTOR row3 = M.r[3];
- size_t i = 0;
- size_t four = VectorCount >> 2;
- if ( four > 0 )
- {
- if (InputStride == sizeof(XMFLOAT3))
- {
- if (OutputStride == sizeof(XMFLOAT3))
- {
- if ( !((uintptr_t)pOutputStream & 0xF) )
- {
- // Packed input, aligned & packed output
- for (size_t j = 0; j < four; ++j)
- {
- __m128 V1 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector) );
- __m128 L2 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector+16) );
- __m128 L3 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector+32) );
- pInputVector += sizeof(XMFLOAT3)*4;
- // Unpack the 4 vectors (.w components are junk)
- XM3UNPACK3INTO4(V1,L2,L3);
- // Result 1
- XMVECTOR Z = XM_PERMUTE_PS( V1, _MM_SHUFFLE(2, 2, 2, 2) );
- XMVECTOR Y = XM_PERMUTE_PS( V1, _MM_SHUFFLE(1, 1, 1, 1) );
- XMVECTOR X = XM_PERMUTE_PS( V1, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Z, row2 );
- XMVECTOR vTemp2 = _mm_mul_ps( Y, row1 );
- XMVECTOR vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- XMVECTOR W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- V1 = _mm_div_ps( vTemp, W );
- // Result 2
- Z = XM_PERMUTE_PS( V2, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V2, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V2, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, row2 );
- vTemp2 = _mm_mul_ps( Y, row1 );
- vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- V2 = _mm_div_ps( vTemp, W );
- // Result 3
- Z = XM_PERMUTE_PS( V3, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V3, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V3, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, row2 );
- vTemp2 = _mm_mul_ps( Y, row1 );
- vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- V3 = _mm_div_ps( vTemp, W );
- // Result 4
- Z = XM_PERMUTE_PS( V4, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V4, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V4, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, row2 );
- vTemp2 = _mm_mul_ps( Y, row1 );
- vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- V4 = _mm_div_ps( vTemp, W );
- // Pack and store the vectors
- XM3PACK4INTO3(vTemp);
- XM_STREAM_PS( reinterpret_cast<float*>(pOutputVector), V1 );
- XM_STREAM_PS( reinterpret_cast<float*>(pOutputVector+16), vTemp );
- XM_STREAM_PS( reinterpret_cast<float*>(pOutputVector+32), V3 );
- pOutputVector += sizeof(XMFLOAT3)*4;
- i += 4;
- }
- }
- else
- {
- // Packed input, unaligned & packed output
- for (size_t j = 0; j < four; ++j)
- {
- __m128 V1 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector) );
- __m128 L2 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector+16) );
- __m128 L3 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector+32) );
- pInputVector += sizeof(XMFLOAT3)*4;
- // Unpack the 4 vectors (.w components are junk)
- XM3UNPACK3INTO4(V1,L2,L3);
- // Result 1
- XMVECTOR Z = XM_PERMUTE_PS( V1, _MM_SHUFFLE(2, 2, 2, 2) );
- XMVECTOR Y = XM_PERMUTE_PS( V1, _MM_SHUFFLE(1, 1, 1, 1) );
- XMVECTOR X = XM_PERMUTE_PS( V1, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Z, row2 );
- XMVECTOR vTemp2 = _mm_mul_ps( Y, row1 );
- XMVECTOR vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- XMVECTOR W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- V1 = _mm_div_ps( vTemp, W );
- // Result 2
- Z = XM_PERMUTE_PS( V2, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V2, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V2, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, row2 );
- vTemp2 = _mm_mul_ps( Y, row1 );
- vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- V2 = _mm_div_ps( vTemp, W );
- // Result 3
- Z = XM_PERMUTE_PS( V3, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V3, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V3, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, row2 );
- vTemp2 = _mm_mul_ps( Y, row1 );
- vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- V3 = _mm_div_ps( vTemp, W );
- // Result 4
- Z = XM_PERMUTE_PS( V4, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V4, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V4, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, row2 );
- vTemp2 = _mm_mul_ps( Y, row1 );
- vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- V4 = _mm_div_ps( vTemp, W );
- // Pack and store the vectors
- XM3PACK4INTO3(vTemp);
- _mm_storeu_ps( reinterpret_cast<float*>(pOutputVector), V1 );
- _mm_storeu_ps( reinterpret_cast<float*>(pOutputVector+16), vTemp );
- _mm_storeu_ps( reinterpret_cast<float*>(pOutputVector+32), V3 );
- pOutputVector += sizeof(XMFLOAT3)*4;
- i += 4;
- }
- }
- }
- else
- {
- // Packed input, unpacked output
- for (size_t j = 0; j < four; ++j)
- {
- __m128 V1 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector) );
- __m128 L2 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector+16) );
- __m128 L3 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector+32) );
- pInputVector += sizeof(XMFLOAT3)*4;
- // Unpack the 4 vectors (.w components are junk)
- XM3UNPACK3INTO4(V1,L2,L3);
- // Result 1
- XMVECTOR Z = XM_PERMUTE_PS( V1, _MM_SHUFFLE(2, 2, 2, 2) );
- XMVECTOR Y = XM_PERMUTE_PS( V1, _MM_SHUFFLE(1, 1, 1, 1) );
- XMVECTOR X = XM_PERMUTE_PS( V1, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Z, row2 );
- XMVECTOR vTemp2 = _mm_mul_ps( Y, row1 );
- XMVECTOR vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- XMVECTOR W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- vTemp = _mm_div_ps( vTemp, W );
- XMStoreFloat3(reinterpret_cast<XMFLOAT3*>(pOutputVector), vTemp);
- pOutputVector += OutputStride;
- // Result 2
- Z = XM_PERMUTE_PS( V2, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V2, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V2, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, row2 );
- vTemp2 = _mm_mul_ps( Y, row1 );
- vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- vTemp = _mm_div_ps( vTemp, W );
- XMStoreFloat3(reinterpret_cast<XMFLOAT3*>(pOutputVector), vTemp);
- pOutputVector += OutputStride;
- // Result 3
- Z = XM_PERMUTE_PS( V3, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V3, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V3, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, row2 );
- vTemp2 = _mm_mul_ps( Y, row1 );
- vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- vTemp = _mm_div_ps( vTemp, W );
- XMStoreFloat3(reinterpret_cast<XMFLOAT3*>(pOutputVector), vTemp);
- pOutputVector += OutputStride;
- // Result 4
- Z = XM_PERMUTE_PS( V4, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V4, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V4, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, row2 );
- vTemp2 = _mm_mul_ps( Y, row1 );
- vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- vTemp = _mm_div_ps( vTemp, W );
- XMStoreFloat3(reinterpret_cast<XMFLOAT3*>(pOutputVector), vTemp);
- pOutputVector += OutputStride;
- i += 4;
- }
- }
- }
- }
- for (; i < VectorCount; i++)
- {
- XMVECTOR V = XMLoadFloat3(reinterpret_cast<const XMFLOAT3*>(pInputVector));
- pInputVector += InputStride;
- XMVECTOR Z = XM_PERMUTE_PS( V, _MM_SHUFFLE(2, 2, 2, 2) );
- XMVECTOR Y = XM_PERMUTE_PS( V, _MM_SHUFFLE(1, 1, 1, 1) );
- XMVECTOR X = XM_PERMUTE_PS( V, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Z, row2 );
- XMVECTOR vTemp2 = _mm_mul_ps( Y, row1 );
- XMVECTOR vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, row3 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- XMVECTOR W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- vTemp = _mm_div_ps( vTemp, W );
- XMStoreFloat3(reinterpret_cast<XMFLOAT3*>(pOutputVector), vTemp);
- pOutputVector += OutputStride;
- }
- XM_SFENCE();
- return pOutputStream;
- #endif
- }
- #ifdef _PREFAST_
- #pragma prefast(pop)
- #endif
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector3TransformNormal
- (
- FXMVECTOR V,
- FXMMATRIX M
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR Z = XMVectorSplatZ(V);
- XMVECTOR Y = XMVectorSplatY(V);
- XMVECTOR X = XMVectorSplatX(V);
- XMVECTOR Result = XMVectorMultiply(Z, M.r[2]);
- Result = XMVectorMultiplyAdd(Y, M.r[1], Result);
- Result = XMVectorMultiplyAdd(X, M.r[0], Result);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- float32x2_t VL = vget_low_f32( V );
- XMVECTOR vResult = vmulq_lane_f32( M.r[0], VL, 0 ); // X
- vResult = vmlaq_lane_f32( vResult, M.r[1], VL, 1 ); // Y
- return vmlaq_lane_f32( vResult, M.r[2], vget_high_f32( V ), 0 ); // Z
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vResult = XM_PERMUTE_PS(V,_MM_SHUFFLE(0,0,0,0));
- vResult = _mm_mul_ps(vResult,M.r[0]);
- XMVECTOR vTemp = XM_PERMUTE_PS(V,_MM_SHUFFLE(1,1,1,1));
- vTemp = _mm_mul_ps(vTemp,M.r[1]);
- vResult = _mm_add_ps(vResult,vTemp);
- vTemp = XM_PERMUTE_PS(V,_MM_SHUFFLE(2,2,2,2));
- vTemp = _mm_mul_ps(vTemp,M.r[2]);
- vResult = _mm_add_ps(vResult,vTemp);
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- #ifdef _PREFAST_
- #pragma prefast(push)
- #pragma prefast(disable : 26015 26019, "PREfast noise: Esp:1307" )
- #endif
- _Use_decl_annotations_
- inline XMFLOAT3* XM_CALLCONV XMVector3TransformNormalStream
- (
- XMFLOAT3* pOutputStream,
- size_t OutputStride,
- const XMFLOAT3* pInputStream,
- size_t InputStride,
- size_t VectorCount,
- FXMMATRIX M
- )
- {
- assert(pOutputStream != nullptr);
- assert(pInputStream != nullptr);
- assert(InputStride >= sizeof(XMFLOAT3));
- _Analysis_assume_(InputStride >= sizeof(XMFLOAT3));
- assert(OutputStride >= sizeof(XMFLOAT3));
- _Analysis_assume_(OutputStride >= sizeof(XMFLOAT3));
- #if defined(_XM_NO_INTRINSICS_)
- const uint8_t* pInputVector = (const uint8_t*)pInputStream;
- uint8_t* pOutputVector = (uint8_t*)pOutputStream;
- const XMVECTOR row0 = M.r[0];
- const XMVECTOR row1 = M.r[1];
- const XMVECTOR row2 = M.r[2];
- for (size_t i = 0; i < VectorCount; i++)
- {
- XMVECTOR V = XMLoadFloat3((const XMFLOAT3*)pInputVector);
- XMVECTOR Z = XMVectorSplatZ(V);
- XMVECTOR Y = XMVectorSplatY(V);
- XMVECTOR X = XMVectorSplatX(V);
- XMVECTOR Result = XMVectorMultiply(Z, row2);
- Result = XMVectorMultiplyAdd(Y, row1, Result);
- Result = XMVectorMultiplyAdd(X, row0, Result);
- XMStoreFloat3((XMFLOAT3*)pOutputVector, Result);
- pInputVector += InputStride;
- pOutputVector += OutputStride;
- }
- return pOutputStream;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- const uint8_t* pInputVector = (const uint8_t*)pInputStream;
- uint8_t* pOutputVector = (uint8_t*)pOutputStream;
- const XMVECTOR row0 = M.r[0];
- const XMVECTOR row1 = M.r[1];
- const XMVECTOR row2 = M.r[2];
- size_t i = 0;
- size_t four = VectorCount >> 2;
- if ( four > 0 )
- {
- if ((InputStride == sizeof(XMFLOAT3)) && (OutputStride == sizeof(XMFLOAT3)))
- {
- for (size_t j = 0; j < four; ++j)
- {
- float32x4x3_t V = vld3q_f32( reinterpret_cast<const float*>(pInputVector) );
- pInputVector += sizeof(XMFLOAT3)*4;
- float32x2_t r = vget_low_f32( row0 );
- XMVECTOR vResult0 = vmulq_lane_f32( V.val[0], r, 0 ); // Ax
- XMVECTOR vResult1 = vmulq_lane_f32( V.val[0], r, 1 ); // Bx
- __prefetch( pInputVector );
- r = vget_high_f32( row0 );
- XMVECTOR vResult2 = vmulq_lane_f32( V.val[0], r, 0 ); // Cx
- __prefetch( pInputVector+XM_CACHE_LINE_SIZE );
- r = vget_low_f32( row1 );
- vResult0 = vmlaq_lane_f32( vResult0, V.val[1], r, 0 ); // Ax+Ey
- vResult1 = vmlaq_lane_f32( vResult1, V.val[1], r, 1 ); // Bx+Fy
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*2) );
- r = vget_high_f32( row1 );
- vResult2 = vmlaq_lane_f32( vResult2, V.val[1], r, 0 ); // Cx+Gy
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*3) );
- r = vget_low_f32( row2 );
- vResult0 = vmlaq_lane_f32( vResult0, V.val[2], r, 0 ); // Ax+Ey+Iz
- vResult1 = vmlaq_lane_f32( vResult1, V.val[2], r, 1 ); // Bx+Fy+Jz
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*4) );
- r = vget_high_f32( row2 );
- vResult2 = vmlaq_lane_f32( vResult2, V.val[2], r, 0 ); // Cx+Gy+Kz
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*5) );
- V.val[0] = vResult0;
- V.val[1] = vResult1;
- V.val[2] = vResult2;
- vst3q_f32( reinterpret_cast<float*>(pOutputVector), V );
- pOutputVector += sizeof(XMFLOAT3)*4;
- i += 4;
- }
- }
- }
- for (; i < VectorCount; i++)
- {
- float32x2_t VL = vld1_f32( reinterpret_cast<const float*>(pInputVector) );
- float32x2_t zero = vdup_n_f32(0);
- float32x2_t VH = vld1_lane_f32( reinterpret_cast<const float*>(pInputVector)+2, zero, 0 );
- pInputVector += InputStride;
- XMVECTOR vResult = vmulq_lane_f32( row0, VL, 0 ); // X
- vResult = vmlaq_lane_f32( vResult, row1, VL, 1 ); // Y
- vResult = vmlaq_lane_f32( vResult, row2, VH, 0 ); // Z
- VL = vget_low_f32( vResult );
- vst1_f32( reinterpret_cast<float*>(pOutputVector), VL );
- vst1q_lane_f32( reinterpret_cast<float*>(pOutputVector)+2, vResult, 2 );
- pOutputVector += OutputStride;
- }
- return pOutputStream;
- #elif defined(_XM_SSE_INTRINSICS_)
- const uint8_t* pInputVector = (const uint8_t*)pInputStream;
- uint8_t* pOutputVector = (uint8_t*)pOutputStream;
- const XMVECTOR row0 = M.r[0];
- const XMVECTOR row1 = M.r[1];
- const XMVECTOR row2 = M.r[2];
- size_t i = 0;
- size_t four = VectorCount >> 2;
- if ( four > 0 )
- {
- if (InputStride == sizeof(XMFLOAT3))
- {
- if (OutputStride == sizeof(XMFLOAT3))
- {
- if ( !((uintptr_t)pOutputStream & 0xF) )
- {
- // Packed input, aligned & packed output
- for (size_t j = 0; j < four; ++j)
- {
- __m128 V1 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector) );
- __m128 L2 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector+16) );
- __m128 L3 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector+32) );
- pInputVector += sizeof(XMFLOAT3)*4;
- // Unpack the 4 vectors (.w components are junk)
- XM3UNPACK3INTO4(V1,L2,L3);
- // Result 1
- XMVECTOR Z = XM_PERMUTE_PS( V1, _MM_SHUFFLE(2, 2, 2, 2) );
- XMVECTOR Y = XM_PERMUTE_PS( V1, _MM_SHUFFLE(1, 1, 1, 1) );
- XMVECTOR X = XM_PERMUTE_PS( V1, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Z, row2 );
- XMVECTOR vTemp2 = _mm_mul_ps( Y, row1 );
- XMVECTOR vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- V1 = _mm_add_ps( vTemp, vTemp3 );
- // Result 2
- Z = XM_PERMUTE_PS( V2, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V2, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V2, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, row2 );
- vTemp2 = _mm_mul_ps( Y, row1 );
- vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- V2 = _mm_add_ps( vTemp, vTemp3 );
- // Result 3
- Z = XM_PERMUTE_PS( V3, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V3, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V3, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, row2 );
- vTemp2 = _mm_mul_ps( Y, row1 );
- vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- V3 = _mm_add_ps( vTemp, vTemp3 );
- // Result 4
- Z = XM_PERMUTE_PS( V4, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V4, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V4, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, row2 );
- vTemp2 = _mm_mul_ps( Y, row1 );
- vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- V4 = _mm_add_ps( vTemp, vTemp3 );
- // Pack and store the vectors
- XM3PACK4INTO3(vTemp);
- XM_STREAM_PS( reinterpret_cast<float*>(pOutputVector), V1 );
- XM_STREAM_PS( reinterpret_cast<float*>(pOutputVector+16), vTemp );
- XM_STREAM_PS( reinterpret_cast<float*>(pOutputVector+32), V3 );
- pOutputVector += sizeof(XMFLOAT3)*4;
- i += 4;
- }
- }
- else
- {
- // Packed input, unaligned & packed output
- for (size_t j = 0; j < four; ++j)
- {
- __m128 V1 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector) );
- __m128 L2 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector+16) );
- __m128 L3 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector+32) );
- pInputVector += sizeof(XMFLOAT3)*4;
- // Unpack the 4 vectors (.w components are junk)
- XM3UNPACK3INTO4(V1,L2,L3);
- // Result 1
- XMVECTOR Z = XM_PERMUTE_PS( V1, _MM_SHUFFLE(2, 2, 2, 2) );
- XMVECTOR Y = XM_PERMUTE_PS( V1, _MM_SHUFFLE(1, 1, 1, 1) );
- XMVECTOR X = XM_PERMUTE_PS( V1, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Z, row2 );
- XMVECTOR vTemp2 = _mm_mul_ps( Y, row1 );
- XMVECTOR vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- V1 = _mm_add_ps( vTemp, vTemp3 );
- // Result 2
- Z = XM_PERMUTE_PS( V2, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V2, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V2, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, row2 );
- vTemp2 = _mm_mul_ps( Y, row1 );
- vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- V2 = _mm_add_ps( vTemp, vTemp3 );
- // Result 3
- Z = XM_PERMUTE_PS( V3, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V3, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V3, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, row2 );
- vTemp2 = _mm_mul_ps( Y, row1 );
- vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- V3 = _mm_add_ps( vTemp, vTemp3 );
- // Result 4
- Z = XM_PERMUTE_PS( V4, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V4, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V4, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, row2 );
- vTemp2 = _mm_mul_ps( Y, row1 );
- vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- V4 = _mm_add_ps( vTemp, vTemp3 );
- // Pack and store the vectors
- XM3PACK4INTO3(vTemp);
- _mm_storeu_ps( reinterpret_cast<float*>(pOutputVector), V1 );
- _mm_storeu_ps( reinterpret_cast<float*>(pOutputVector+16), vTemp );
- _mm_storeu_ps( reinterpret_cast<float*>(pOutputVector+32), V3 );
- pOutputVector += sizeof(XMFLOAT3)*4;
- i += 4;
- }
- }
- }
- else
- {
- // Packed input, unpacked output
- for (size_t j = 0; j < four; ++j)
- {
- __m128 V1 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector) );
- __m128 L2 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector+16) );
- __m128 L3 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector+32) );
- pInputVector += sizeof(XMFLOAT3)*4;
- // Unpack the 4 vectors (.w components are junk)
- XM3UNPACK3INTO4(V1,L2,L3);
- // Result 1
- XMVECTOR Z = XM_PERMUTE_PS( V1, _MM_SHUFFLE(2, 2, 2, 2) );
- XMVECTOR Y = XM_PERMUTE_PS( V1, _MM_SHUFFLE(1, 1, 1, 1) );
- XMVECTOR X = XM_PERMUTE_PS( V1, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Z, row2 );
- XMVECTOR vTemp2 = _mm_mul_ps( Y, row1 );
- XMVECTOR vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- XMStoreFloat3(reinterpret_cast<XMFLOAT3*>(pOutputVector), vTemp);
- pOutputVector += OutputStride;
- // Result 2
- Z = XM_PERMUTE_PS( V2, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V2, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V2, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, row2 );
- vTemp2 = _mm_mul_ps( Y, row1 );
- vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- XMStoreFloat3(reinterpret_cast<XMFLOAT3*>(pOutputVector), vTemp);
- pOutputVector += OutputStride;
- // Result 3
- Z = XM_PERMUTE_PS( V3, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V3, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V3, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, row2 );
- vTemp2 = _mm_mul_ps( Y, row1 );
- vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- XMStoreFloat3(reinterpret_cast<XMFLOAT3*>(pOutputVector), vTemp);
- pOutputVector += OutputStride;
- // Result 4
- Z = XM_PERMUTE_PS( V4, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V4, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V4, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, row2 );
- vTemp2 = _mm_mul_ps( Y, row1 );
- vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- XMStoreFloat3(reinterpret_cast<XMFLOAT3*>(pOutputVector), vTemp);
- pOutputVector += OutputStride;
- i += 4;
- }
- }
- }
- }
- for (; i < VectorCount; i++)
- {
- XMVECTOR V = XMLoadFloat3(reinterpret_cast<const XMFLOAT3*>(pInputVector));
- pInputVector += InputStride;
- XMVECTOR Z = XM_PERMUTE_PS( V, _MM_SHUFFLE(2, 2, 2, 2) );
- XMVECTOR Y = XM_PERMUTE_PS( V, _MM_SHUFFLE(1, 1, 1, 1) );
- XMVECTOR X = XM_PERMUTE_PS( V, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Z, row2 );
- XMVECTOR vTemp2 = _mm_mul_ps( Y, row1 );
- XMVECTOR vTemp3 = _mm_mul_ps( X, row0 );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- XMStoreFloat3(reinterpret_cast<XMFLOAT3*>(pOutputVector), vTemp);
- pOutputVector += OutputStride;
- }
- XM_SFENCE();
- return pOutputStream;
- #endif
- }
- #ifdef _PREFAST_
- #pragma prefast(pop)
- #endif
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector3Project
- (
- FXMVECTOR V,
- float ViewportX,
- float ViewportY,
- float ViewportWidth,
- float ViewportHeight,
- float ViewportMinZ,
- float ViewportMaxZ,
- FXMMATRIX Projection,
- CXMMATRIX View,
- CXMMATRIX World
- )
- {
- const float HalfViewportWidth = ViewportWidth * 0.5f;
- const float HalfViewportHeight = ViewportHeight * 0.5f;
- XMVECTOR Scale = XMVectorSet(HalfViewportWidth, -HalfViewportHeight, ViewportMaxZ - ViewportMinZ, 0.0f);
- XMVECTOR Offset = XMVectorSet(ViewportX + HalfViewportWidth, ViewportY + HalfViewportHeight, ViewportMinZ, 0.0f);
- XMMATRIX Transform = XMMatrixMultiply(World, View);
- Transform = XMMatrixMultiply(Transform, Projection);
- XMVECTOR Result = XMVector3TransformCoord(V, Transform);
- Result = XMVectorMultiplyAdd(Result, Scale, Offset);
- return Result;
- }
- //------------------------------------------------------------------------------
- #ifdef _PREFAST_
- #pragma prefast(push)
- #pragma prefast(disable : 26015 26019, "PREfast noise: Esp:1307" )
- #endif
- _Use_decl_annotations_
- inline XMFLOAT3* XM_CALLCONV XMVector3ProjectStream
- (
- XMFLOAT3* pOutputStream,
- size_t OutputStride,
- const XMFLOAT3* pInputStream,
- size_t InputStride,
- size_t VectorCount,
- float ViewportX,
- float ViewportY,
- float ViewportWidth,
- float ViewportHeight,
- float ViewportMinZ,
- float ViewportMaxZ,
- FXMMATRIX Projection,
- CXMMATRIX View,
- CXMMATRIX World
- )
- {
- assert(pOutputStream != nullptr);
- assert(pInputStream != nullptr);
- assert(InputStride >= sizeof(XMFLOAT3));
- _Analysis_assume_(InputStride >= sizeof(XMFLOAT3));
- assert(OutputStride >= sizeof(XMFLOAT3));
- _Analysis_assume_(OutputStride >= sizeof(XMFLOAT3));
- #if defined(_XM_NO_INTRINSICS_)
- const float HalfViewportWidth = ViewportWidth * 0.5f;
- const float HalfViewportHeight = ViewportHeight * 0.5f;
- XMVECTOR Scale = XMVectorSet(HalfViewportWidth, -HalfViewportHeight, ViewportMaxZ - ViewportMinZ, 1.0f);
- XMVECTOR Offset = XMVectorSet(ViewportX + HalfViewportWidth, ViewportY + HalfViewportHeight, ViewportMinZ, 0.0f);
- XMMATRIX Transform = XMMatrixMultiply(World, View);
- Transform = XMMatrixMultiply(Transform, Projection);
- const uint8_t* pInputVector = (const uint8_t*)pInputStream;
- uint8_t* pOutputVector = (uint8_t*)pOutputStream;
- for (size_t i = 0; i < VectorCount; i++)
- {
- XMVECTOR V = XMLoadFloat3((const XMFLOAT3*)pInputVector);
- XMVECTOR Result = XMVector3TransformCoord(V, Transform);
- Result = XMVectorMultiplyAdd(Result, Scale, Offset);
- XMStoreFloat3((XMFLOAT3*)pOutputVector, Result);
- pInputVector += InputStride;
- pOutputVector += OutputStride;
- }
- return pOutputStream;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- const float HalfViewportWidth = ViewportWidth * 0.5f;
- const float HalfViewportHeight = ViewportHeight * 0.5f;
- XMMATRIX Transform = XMMatrixMultiply(World, View);
- Transform = XMMatrixMultiply(Transform, Projection);
- const uint8_t* pInputVector = (const uint8_t*)pInputStream;
- uint8_t* pOutputVector = (uint8_t*)pOutputStream;
- size_t i = 0;
- size_t four = VectorCount >> 2;
- if ( four > 0 )
- {
- if ((InputStride == sizeof(XMFLOAT3)) && (OutputStride == sizeof(XMFLOAT3)))
- {
- XMVECTOR ScaleX = vdupq_n_f32(HalfViewportWidth);
- XMVECTOR ScaleY = vdupq_n_f32(-HalfViewportHeight);
- XMVECTOR ScaleZ = vdupq_n_f32(ViewportMaxZ - ViewportMinZ);
- XMVECTOR OffsetX = vdupq_n_f32(ViewportX + HalfViewportWidth);
- XMVECTOR OffsetY = vdupq_n_f32(ViewportY + HalfViewportHeight);
- XMVECTOR OffsetZ = vdupq_n_f32(ViewportMinZ);
- for (size_t j = 0; j < four; ++j)
- {
- float32x4x3_t V = vld3q_f32( reinterpret_cast<const float*>(pInputVector) );
- pInputVector += sizeof(XMFLOAT3)*4;
- float32x2_t r3 = vget_low_f32( Transform.r[3] );
- float32x2_t r = vget_low_f32( Transform.r[0] );
- XMVECTOR vResult0 = vmlaq_lane_f32( vdupq_lane_f32( r3, 0 ), V.val[0], r, 0 ); // Ax+M
- XMVECTOR vResult1 = vmlaq_lane_f32( vdupq_lane_f32( r3, 1 ), V.val[0], r, 1 ); // Bx+N
- __prefetch( pInputVector );
- r3 = vget_high_f32( Transform.r[3] );
- r = vget_high_f32( Transform.r[0] );
- XMVECTOR vResult2 = vmlaq_lane_f32( vdupq_lane_f32( r3, 0 ), V.val[0], r, 0 ); // Cx+O
- XMVECTOR W = vmlaq_lane_f32( vdupq_lane_f32( r3, 1 ), V.val[0], r, 1 ); // Dx+P
- __prefetch( pInputVector+XM_CACHE_LINE_SIZE );
- r = vget_low_f32( Transform.r[1] );
- vResult0 = vmlaq_lane_f32( vResult0, V.val[1], r, 0 ); // Ax+Ey+M
- vResult1 = vmlaq_lane_f32( vResult1, V.val[1], r, 1 ); // Bx+Fy+N
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*2) );
- r = vget_high_f32( Transform.r[1] );
- vResult2 = vmlaq_lane_f32( vResult2, V.val[1], r, 0 ); // Cx+Gy+O
- W = vmlaq_lane_f32( W, V.val[1], r, 1 ); // Dx+Hy+P
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*3) );
- r = vget_low_f32( Transform.r[2] );
- vResult0 = vmlaq_lane_f32( vResult0, V.val[2], r, 0 ); // Ax+Ey+Iz+M
- vResult1 = vmlaq_lane_f32( vResult1, V.val[2], r, 1 ); // Bx+Fy+Jz+N
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*4) );
- r = vget_high_f32( Transform.r[2] );
- vResult2 = vmlaq_lane_f32( vResult2, V.val[2], r, 0 ); // Cx+Gy+Kz+O
- W = vmlaq_lane_f32( W, V.val[2], r, 1 ); // Dx+Hy+Lz+P
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*5) );
- #if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64)
- vResult0 = vdivq_f32( vResult0, W );
- vResult1 = vdivq_f32( vResult1, W );
- vResult2 = vdivq_f32( vResult2, W );
- #else
- // 2 iterations of Newton-Raphson refinement of reciprocal
- float32x4_t Reciprocal = vrecpeq_f32(W);
- float32x4_t S = vrecpsq_f32( Reciprocal, W );
- Reciprocal = vmulq_f32( S, Reciprocal );
- S = vrecpsq_f32( Reciprocal, W );
- Reciprocal = vmulq_f32( S, Reciprocal );
- vResult0 = vmulq_f32( vResult0, Reciprocal );
- vResult1 = vmulq_f32( vResult1, Reciprocal );
- vResult2 = vmulq_f32( vResult2, Reciprocal );
- #endif
- V.val[0] = vmlaq_f32( OffsetX, vResult0, ScaleX );
- V.val[1] = vmlaq_f32( OffsetY, vResult1, ScaleY );
- V.val[2] = vmlaq_f32( OffsetZ, vResult2, ScaleZ );
- vst3q_f32( reinterpret_cast<float*>(pOutputVector),V );
- pOutputVector += sizeof(XMFLOAT3)*4;
- i += 4;
- }
- }
- }
- if ( i < VectorCount)
- {
- XMVECTOR Scale = XMVectorSet(HalfViewportWidth, -HalfViewportHeight, ViewportMaxZ - ViewportMinZ, 1.0f);
- XMVECTOR Offset = XMVectorSet(ViewportX + HalfViewportWidth, ViewportY + HalfViewportHeight, ViewportMinZ, 0.0f);
- for (; i < VectorCount; i++)
- {
- float32x2_t VL = vld1_f32( reinterpret_cast<const float*>(pInputVector) );
- float32x2_t zero = vdup_n_f32(0);
- float32x2_t VH = vld1_lane_f32( reinterpret_cast<const float*>(pInputVector)+2, zero, 0 );
- pInputVector += InputStride;
- XMVECTOR vResult = vmlaq_lane_f32( Transform.r[3], Transform.r[0], VL, 0 ); // X
- vResult = vmlaq_lane_f32( vResult, Transform.r[1], VL, 1 ); // Y
- vResult = vmlaq_lane_f32( vResult, Transform.r[2], VH, 0 ); // Z
- VH = vget_high_f32(vResult);
- XMVECTOR W = vdupq_lane_f32( VH, 1 );
- #if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64)
- vResult = vdivq_f32( vResult, W );
- #else
- // 2 iterations of Newton-Raphson refinement of reciprocal for W
- float32x4_t Reciprocal = vrecpeq_f32( W );
- float32x4_t S = vrecpsq_f32( Reciprocal, W );
- Reciprocal = vmulq_f32( S, Reciprocal );
- S = vrecpsq_f32( Reciprocal, W );
- Reciprocal = vmulq_f32( S, Reciprocal );
- vResult = vmulq_f32( vResult, Reciprocal );
- #endif
- vResult = vmlaq_f32( Offset, vResult, Scale );
- VL = vget_low_f32( vResult );
- vst1_f32( reinterpret_cast<float*>(pOutputVector), VL );
- vst1q_lane_f32( reinterpret_cast<float*>(pOutputVector)+2, vResult, 2 );
- pOutputVector += OutputStride;
- }
- }
- return pOutputStream;
- #elif defined(_XM_SSE_INTRINSICS_)
- const float HalfViewportWidth = ViewportWidth * 0.5f;
- const float HalfViewportHeight = ViewportHeight * 0.5f;
- XMVECTOR Scale = XMVectorSet(HalfViewportWidth, -HalfViewportHeight, ViewportMaxZ - ViewportMinZ, 1.0f);
- XMVECTOR Offset = XMVectorSet(ViewportX + HalfViewportWidth, ViewportY + HalfViewportHeight, ViewportMinZ, 0.0f);
- XMMATRIX Transform = XMMatrixMultiply(World, View);
- Transform = XMMatrixMultiply(Transform, Projection);
- const uint8_t* pInputVector = (const uint8_t*)pInputStream;
- uint8_t* pOutputVector = (uint8_t*)pOutputStream;
- size_t i = 0;
- size_t four = VectorCount >> 2;
- if ( four > 0 )
- {
- if (InputStride == sizeof(XMFLOAT3))
- {
- if (OutputStride == sizeof(XMFLOAT3))
- {
- if ( !((uintptr_t)pOutputStream & 0xF) )
- {
- // Packed input, aligned & packed output
- for (size_t j = 0; j < four; ++j)
- {
- __m128 V1 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector) );
- __m128 L2 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector+16) );
- __m128 L3 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector+32) );
- pInputVector += sizeof(XMFLOAT3)*4;
- // Unpack the 4 vectors (.w components are junk)
- XM3UNPACK3INTO4(V1,L2,L3);
- // Result 1
- XMVECTOR Z = XM_PERMUTE_PS( V1, _MM_SHUFFLE(2, 2, 2, 2) );
- XMVECTOR Y = XM_PERMUTE_PS( V1, _MM_SHUFFLE(1, 1, 1, 1) );
- XMVECTOR X = XM_PERMUTE_PS( V1, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Z, Transform.r[2] );
- XMVECTOR vTemp2 = _mm_mul_ps( Y, Transform.r[1] );
- XMVECTOR vTemp3 = _mm_mul_ps( X, Transform.r[0] );
- vTemp = _mm_add_ps( vTemp, Transform.r[3] );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- XMVECTOR W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- vTemp = _mm_div_ps( vTemp, W );
- vTemp = _mm_mul_ps( vTemp, Scale );
- V1 = _mm_add_ps( vTemp, Offset );
- // Result 2
- Z = XM_PERMUTE_PS( V2, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V2, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V2, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, Transform.r[2] );
- vTemp2 = _mm_mul_ps( Y, Transform.r[1] );
- vTemp3 = _mm_mul_ps( X, Transform.r[0] );
- vTemp = _mm_add_ps( vTemp, Transform.r[3] );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- vTemp = _mm_div_ps( vTemp, W );
- vTemp = _mm_mul_ps( vTemp, Scale );
- V2 = _mm_add_ps( vTemp, Offset );
- // Result 3
- Z = XM_PERMUTE_PS( V3, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V3, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V3, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, Transform.r[2] );
- vTemp2 = _mm_mul_ps( Y, Transform.r[1] );
- vTemp3 = _mm_mul_ps( X, Transform.r[0] );
- vTemp = _mm_add_ps( vTemp, Transform.r[3] );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- vTemp = _mm_div_ps( vTemp, W );
- vTemp = _mm_mul_ps( vTemp, Scale );
- V3 = _mm_add_ps( vTemp, Offset );
- // Result 4
- Z = XM_PERMUTE_PS( V4, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V4, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V4, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, Transform.r[2] );
- vTemp2 = _mm_mul_ps( Y, Transform.r[1] );
- vTemp3 = _mm_mul_ps( X, Transform.r[0] );
- vTemp = _mm_add_ps( vTemp, Transform.r[3] );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- vTemp = _mm_div_ps( vTemp, W );
- vTemp = _mm_mul_ps( vTemp, Scale );
- V4 = _mm_add_ps( vTemp, Offset );
- // Pack and store the vectors
- XM3PACK4INTO3(vTemp);
- XM_STREAM_PS( reinterpret_cast<float*>(pOutputVector), V1 );
- XM_STREAM_PS( reinterpret_cast<float*>(pOutputVector+16), vTemp );
- XM_STREAM_PS( reinterpret_cast<float*>(pOutputVector+32), V3 );
- pOutputVector += sizeof(XMFLOAT3)*4;
- i += 4;
- }
- }
- else
- {
- // Packed input, unaligned & packed output
- for (size_t j = 0; j < four; ++j)
- {
- __m128 V1 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector) );
- __m128 L2 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector+16) );
- __m128 L3 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector+32) );
- pInputVector += sizeof(XMFLOAT3)*4;
- // Unpack the 4 vectors (.w components are junk)
- XM3UNPACK3INTO4(V1,L2,L3);
- // Result 1
- XMVECTOR Z = XM_PERMUTE_PS( V1, _MM_SHUFFLE(2, 2, 2, 2) );
- XMVECTOR Y = XM_PERMUTE_PS( V1, _MM_SHUFFLE(1, 1, 1, 1) );
- XMVECTOR X = XM_PERMUTE_PS( V1, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Z, Transform.r[2] );
- XMVECTOR vTemp2 = _mm_mul_ps( Y, Transform.r[1] );
- XMVECTOR vTemp3 = _mm_mul_ps( X, Transform.r[0] );
- vTemp = _mm_add_ps( vTemp, Transform.r[3] );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- XMVECTOR W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- vTemp = _mm_div_ps( vTemp, W );
- vTemp = _mm_mul_ps( vTemp, Scale );
- V1 = _mm_add_ps( vTemp, Offset );
- // Result 2
- Z = XM_PERMUTE_PS( V2, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V2, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V2, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, Transform.r[2] );
- vTemp2 = _mm_mul_ps( Y, Transform.r[1] );
- vTemp3 = _mm_mul_ps( X, Transform.r[0] );
- vTemp = _mm_add_ps( vTemp, Transform.r[3] );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- vTemp = _mm_div_ps( vTemp, W );
- vTemp = _mm_mul_ps( vTemp, Scale );
- V2 = _mm_add_ps( vTemp, Offset );
- // Result 3
- Z = XM_PERMUTE_PS( V3, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V3, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V3, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, Transform.r[2] );
- vTemp2 = _mm_mul_ps( Y, Transform.r[1] );
- vTemp3 = _mm_mul_ps( X, Transform.r[0] );
- vTemp = _mm_add_ps( vTemp, Transform.r[3] );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- vTemp = _mm_div_ps( vTemp, W );
- vTemp = _mm_mul_ps( vTemp, Scale );
- V3 = _mm_add_ps( vTemp, Offset );
- // Result 4
- Z = XM_PERMUTE_PS( V4, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V4, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V4, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, Transform.r[2] );
- vTemp2 = _mm_mul_ps( Y, Transform.r[1] );
- vTemp3 = _mm_mul_ps( X, Transform.r[0] );
- vTemp = _mm_add_ps( vTemp, Transform.r[3] );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- vTemp = _mm_div_ps( vTemp, W );
- vTemp = _mm_mul_ps( vTemp, Scale );
- V4 = _mm_add_ps( vTemp, Offset );
- // Pack and store the vectors
- XM3PACK4INTO3(vTemp);
- _mm_storeu_ps( reinterpret_cast<float*>(pOutputVector), V1 );
- _mm_storeu_ps( reinterpret_cast<float*>(pOutputVector+16), vTemp );
- _mm_storeu_ps( reinterpret_cast<float*>(pOutputVector+32), V3 );
- pOutputVector += sizeof(XMFLOAT3)*4;
- i += 4;
- }
- }
- }
- else
- {
- // Packed input, unpacked output
- for (size_t j = 0; j < four; ++j)
- {
- __m128 V1 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector) );
- __m128 L2 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector+16) );
- __m128 L3 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector+32) );
- pInputVector += sizeof(XMFLOAT3)*4;
- // Unpack the 4 vectors (.w components are junk)
- XM3UNPACK3INTO4(V1,L2,L3);
- // Result 1
- XMVECTOR Z = XM_PERMUTE_PS( V1, _MM_SHUFFLE(2, 2, 2, 2) );
- XMVECTOR Y = XM_PERMUTE_PS( V1, _MM_SHUFFLE(1, 1, 1, 1) );
- XMVECTOR X = XM_PERMUTE_PS( V1, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Z, Transform.r[2] );
- XMVECTOR vTemp2 = _mm_mul_ps( Y, Transform.r[1] );
- XMVECTOR vTemp3 = _mm_mul_ps( X, Transform.r[0] );
- vTemp = _mm_add_ps( vTemp, Transform.r[3] );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- XMVECTOR W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- vTemp = _mm_div_ps( vTemp, W );
- vTemp = _mm_mul_ps( vTemp, Scale );
- vTemp = _mm_add_ps( vTemp, Offset );
- XMStoreFloat3(reinterpret_cast<XMFLOAT3*>(pOutputVector), vTemp);
- pOutputVector += OutputStride;
- // Result 2
- Z = XM_PERMUTE_PS( V2, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V2, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V2, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, Transform.r[2] );
- vTemp2 = _mm_mul_ps( Y, Transform.r[1] );
- vTemp3 = _mm_mul_ps( X, Transform.r[0] );
- vTemp = _mm_add_ps( vTemp, Transform.r[3] );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- vTemp = _mm_div_ps( vTemp, W );
- vTemp = _mm_mul_ps( vTemp, Scale );
- vTemp = _mm_add_ps( vTemp, Offset );
- XMStoreFloat3(reinterpret_cast<XMFLOAT3*>(pOutputVector), vTemp);
- pOutputVector += OutputStride;
- // Result 3
- Z = XM_PERMUTE_PS( V3, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V3, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V3, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, Transform.r[2] );
- vTemp2 = _mm_mul_ps( Y, Transform.r[1] );
- vTemp3 = _mm_mul_ps( X, Transform.r[0] );
- vTemp = _mm_add_ps( vTemp, Transform.r[3] );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- vTemp = _mm_div_ps( vTemp, W );
- vTemp = _mm_mul_ps( vTemp, Scale );
- vTemp = _mm_add_ps( vTemp, Offset );
- XMStoreFloat3(reinterpret_cast<XMFLOAT3*>(pOutputVector), vTemp);
- pOutputVector += OutputStride;
- // Result 4
- Z = XM_PERMUTE_PS( V4, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V4, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V4, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, Transform.r[2] );
- vTemp2 = _mm_mul_ps( Y, Transform.r[1] );
- vTemp3 = _mm_mul_ps( X, Transform.r[0] );
- vTemp = _mm_add_ps( vTemp, Transform.r[3] );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- vTemp = _mm_div_ps( vTemp, W );
- vTemp = _mm_mul_ps( vTemp, Scale );
- vTemp = _mm_add_ps( vTemp, Offset );
- XMStoreFloat3(reinterpret_cast<XMFLOAT3*>(pOutputVector), vTemp);
- pOutputVector += OutputStride;
- i += 4;
- }
- }
- }
- }
- for (; i < VectorCount; i++)
- {
- XMVECTOR V = XMLoadFloat3(reinterpret_cast<const XMFLOAT3*>(pInputVector));
- pInputVector += InputStride;
- XMVECTOR Z = XM_PERMUTE_PS( V, _MM_SHUFFLE(2, 2, 2, 2) );
- XMVECTOR Y = XM_PERMUTE_PS( V, _MM_SHUFFLE(1, 1, 1, 1) );
- XMVECTOR X = XM_PERMUTE_PS( V, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Z, Transform.r[2] );
- XMVECTOR vTemp2 = _mm_mul_ps( Y, Transform.r[1] );
- XMVECTOR vTemp3 = _mm_mul_ps( X, Transform.r[0] );
- vTemp = _mm_add_ps( vTemp, Transform.r[3] );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- XMVECTOR W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- vTemp = _mm_div_ps( vTemp, W );
- vTemp = _mm_mul_ps( vTemp, Scale );
- vTemp = _mm_add_ps( vTemp, Offset );
- XMStoreFloat3(reinterpret_cast<XMFLOAT3*>(pOutputVector), vTemp);
- pOutputVector += OutputStride;
- }
- XM_SFENCE();
- return pOutputStream;
- #endif
- }
- #ifdef _PREFAST_
- #pragma prefast(pop)
- #endif
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector3Unproject
- (
- FXMVECTOR V,
- float ViewportX,
- float ViewportY,
- float ViewportWidth,
- float ViewportHeight,
- float ViewportMinZ,
- float ViewportMaxZ,
- FXMMATRIX Projection,
- CXMMATRIX View,
- CXMMATRIX World
- )
- {
- static const XMVECTORF32 D = { { { -1.0f, 1.0f, 0.0f, 0.0f } } };
- XMVECTOR Scale = XMVectorSet(ViewportWidth * 0.5f, -ViewportHeight * 0.5f, ViewportMaxZ - ViewportMinZ, 1.0f);
- Scale = XMVectorReciprocal(Scale);
- XMVECTOR Offset = XMVectorSet(-ViewportX, -ViewportY, -ViewportMinZ, 0.0f);
- Offset = XMVectorMultiplyAdd(Scale, Offset, D.v);
- XMMATRIX Transform = XMMatrixMultiply(World, View);
- Transform = XMMatrixMultiply(Transform, Projection);
- Transform = XMMatrixInverse(nullptr, Transform);
- XMVECTOR Result = XMVectorMultiplyAdd(V, Scale, Offset);
- return XMVector3TransformCoord(Result, Transform);
- }
- //------------------------------------------------------------------------------
- #ifdef _PREFAST_
- #pragma prefast(push)
- #pragma prefast(disable : 26015 26019, "PREfast noise: Esp:1307" )
- #endif
- _Use_decl_annotations_
- inline XMFLOAT3* XM_CALLCONV XMVector3UnprojectStream
- (
- XMFLOAT3* pOutputStream,
- size_t OutputStride,
- const XMFLOAT3* pInputStream,
- size_t InputStride,
- size_t VectorCount,
- float ViewportX,
- float ViewportY,
- float ViewportWidth,
- float ViewportHeight,
- float ViewportMinZ,
- float ViewportMaxZ,
- FXMMATRIX Projection,
- CXMMATRIX View,
- CXMMATRIX World)
- {
- assert(pOutputStream != nullptr);
- assert(pInputStream != nullptr);
- assert(InputStride >= sizeof(XMFLOAT3));
- _Analysis_assume_(InputStride >= sizeof(XMFLOAT3));
- assert(OutputStride >= sizeof(XMFLOAT3));
- _Analysis_assume_(OutputStride >= sizeof(XMFLOAT3));
- #if defined(_XM_NO_INTRINSICS_)
- static const XMVECTORF32 D = { { { -1.0f, 1.0f, 0.0f, 0.0f } } };
- XMVECTOR Scale = XMVectorSet(ViewportWidth * 0.5f, -ViewportHeight * 0.5f, ViewportMaxZ - ViewportMinZ, 1.0f);
- Scale = XMVectorReciprocal(Scale);
- XMVECTOR Offset = XMVectorSet(-ViewportX, -ViewportY, -ViewportMinZ, 0.0f);
- Offset = XMVectorMultiplyAdd(Scale, Offset, D.v);
- XMMATRIX Transform = XMMatrixMultiply(World, View);
- Transform = XMMatrixMultiply(Transform, Projection);
- Transform = XMMatrixInverse(nullptr, Transform);
- const uint8_t* pInputVector = (const uint8_t*)pInputStream;
- uint8_t* pOutputVector = (uint8_t*)pOutputStream;
- for (size_t i = 0; i < VectorCount; i++)
- {
- XMVECTOR V = XMLoadFloat3((const XMFLOAT3*)pInputVector);
- XMVECTOR Result = XMVectorMultiplyAdd(V, Scale, Offset);
- Result = XMVector3TransformCoord(Result, Transform);
- XMStoreFloat3((XMFLOAT3*)pOutputVector, Result);
- pInputVector += InputStride;
- pOutputVector += OutputStride;
- }
- return pOutputStream;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- XMMATRIX Transform = XMMatrixMultiply(World, View);
- Transform = XMMatrixMultiply(Transform, Projection);
- Transform = XMMatrixInverse(nullptr, Transform);
- const uint8_t* pInputVector = (const uint8_t*)pInputStream;
- uint8_t* pOutputVector = (uint8_t*)pOutputStream;
- float sx = 1.f / (ViewportWidth * 0.5f);
- float sy = 1.f / (-ViewportHeight * 0.5f);
- float sz = 1.f / (ViewportMaxZ - ViewportMinZ);
- float ox = (-ViewportX * sx) - 1.f;
- float oy = (-ViewportY * sy) + 1.f;
- float oz = (-ViewportMinZ * sz);
- size_t i = 0;
- size_t four = VectorCount >> 2;
- if ( four > 0 )
- {
- if ((InputStride == sizeof(XMFLOAT3)) && (OutputStride == sizeof(XMFLOAT3)))
- {
- for (size_t j = 0; j < four; ++j)
- {
- float32x4x3_t V = vld3q_f32( reinterpret_cast<const float*>(pInputVector) );
- pInputVector += sizeof(XMFLOAT3)*4;
- XMVECTOR ScaleX = vdupq_n_f32(sx);
- XMVECTOR OffsetX = vdupq_n_f32(ox);
- XMVECTOR VX = vmlaq_f32( OffsetX, ScaleX, V.val[0] );
- float32x2_t r3 = vget_low_f32( Transform.r[3] );
- float32x2_t r = vget_low_f32( Transform.r[0] );
- XMVECTOR vResult0 = vmlaq_lane_f32( vdupq_lane_f32( r3, 0 ), VX, r, 0 ); // Ax+M
- XMVECTOR vResult1 = vmlaq_lane_f32( vdupq_lane_f32( r3, 1 ), VX, r, 1 ); // Bx+N
- __prefetch( pInputVector );
- r3 = vget_high_f32( Transform.r[3] );
- r = vget_high_f32( Transform.r[0] );
- XMVECTOR vResult2 = vmlaq_lane_f32( vdupq_lane_f32( r3, 0 ), VX, r, 0 ); // Cx+O
- XMVECTOR W = vmlaq_lane_f32( vdupq_lane_f32( r3, 1 ), VX, r, 1 ); // Dx+P
- __prefetch( pInputVector+XM_CACHE_LINE_SIZE );
- XMVECTOR ScaleY = vdupq_n_f32(sy);
- XMVECTOR OffsetY = vdupq_n_f32(oy);
- XMVECTOR VY = vmlaq_f32( OffsetY, ScaleY, V.val[1] );
- r = vget_low_f32( Transform.r[1] );
- vResult0 = vmlaq_lane_f32( vResult0, VY, r, 0 ); // Ax+Ey+M
- vResult1 = vmlaq_lane_f32( vResult1, VY, r, 1 ); // Bx+Fy+N
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*2) );
- r = vget_high_f32( Transform.r[1] );
- vResult2 = vmlaq_lane_f32( vResult2, VY, r, 0 ); // Cx+Gy+O
- W = vmlaq_lane_f32( W, VY, r, 1 ); // Dx+Hy+P
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*3) );
- XMVECTOR ScaleZ = vdupq_n_f32(sz);
- XMVECTOR OffsetZ = vdupq_n_f32(oz);
- XMVECTOR VZ = vmlaq_f32( OffsetZ, ScaleZ, V.val[2] );
- r = vget_low_f32( Transform.r[2] );
- vResult0 = vmlaq_lane_f32( vResult0, VZ, r, 0 ); // Ax+Ey+Iz+M
- vResult1 = vmlaq_lane_f32( vResult1, VZ, r, 1 ); // Bx+Fy+Jz+N
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*4) );
- r = vget_high_f32( Transform.r[2] );
- vResult2 = vmlaq_lane_f32( vResult2, VZ, r, 0 ); // Cx+Gy+Kz+O
- W = vmlaq_lane_f32( W, VZ, r, 1 ); // Dx+Hy+Lz+P
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*5) );
- #if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64)
- V.val[0] = vdivq_f32( vResult0, W );
- V.val[1] = vdivq_f32( vResult1, W );
- V.val[2] = vdivq_f32( vResult2, W );
- #else
- // 2 iterations of Newton-Raphson refinement of reciprocal
- float32x4_t Reciprocal = vrecpeq_f32(W);
- float32x4_t S = vrecpsq_f32( Reciprocal, W );
- Reciprocal = vmulq_f32( S, Reciprocal );
- S = vrecpsq_f32( Reciprocal, W );
- Reciprocal = vmulq_f32( S, Reciprocal );
-
- V.val[0] = vmulq_f32( vResult0, Reciprocal );
- V.val[1] = vmulq_f32( vResult1, Reciprocal );
- V.val[2] = vmulq_f32( vResult2, Reciprocal );
- #endif
- vst3q_f32( reinterpret_cast<float*>(pOutputVector),V );
- pOutputVector += sizeof(XMFLOAT3)*4;
- i += 4;
- }
- }
- }
- if (i < VectorCount)
- {
- float32x2_t ScaleL = vcreate_f32(((uint64_t)*(const uint32_t *)&sx) | ((uint64_t)(*(const uint32_t *)&sy) << 32));
- float32x2_t ScaleH = vcreate_f32((uint64_t)*(const uint32_t *)&sz);
- float32x2_t OffsetL = vcreate_f32(((uint64_t)*(const uint32_t *)&ox) | ((uint64_t)(*(const uint32_t *)&oy) << 32));
- float32x2_t OffsetH = vcreate_f32((uint64_t)*(const uint32_t *)&oz);
- for (; i < VectorCount; i++)
- {
- float32x2_t VL = vld1_f32( reinterpret_cast<const float*>(pInputVector) );
- float32x2_t zero = vdup_n_f32(0);
- float32x2_t VH = vld1_lane_f32( reinterpret_cast<const float*>(pInputVector)+2, zero, 0 );
- pInputVector += InputStride;
- VL = vmla_f32( OffsetL, VL, ScaleL );
- VH = vmla_f32( OffsetH, VH, ScaleH );
- XMVECTOR vResult = vmlaq_lane_f32( Transform.r[3], Transform.r[0], VL, 0 ); // X
- vResult = vmlaq_lane_f32( vResult, Transform.r[1], VL, 1 ); // Y
- vResult = vmlaq_lane_f32( vResult, Transform.r[2], VH, 0 ); // Z
- VH = vget_high_f32(vResult);
- XMVECTOR W = vdupq_lane_f32( VH, 1 );
- #if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64)
- vResult = vdivq_f32( vResult, W );
- #else
- // 2 iterations of Newton-Raphson refinement of reciprocal for W
- float32x4_t Reciprocal = vrecpeq_f32( W );
- float32x4_t S = vrecpsq_f32( Reciprocal, W );
- Reciprocal = vmulq_f32( S, Reciprocal );
- S = vrecpsq_f32( Reciprocal, W );
- Reciprocal = vmulq_f32( S, Reciprocal );
- vResult = vmulq_f32( vResult, Reciprocal );
- #endif
- VL = vget_low_f32( vResult );
- vst1_f32( reinterpret_cast<float*>(pOutputVector), VL );
- vst1q_lane_f32( reinterpret_cast<float*>(pOutputVector)+2, vResult, 2 );
- pOutputVector += OutputStride;
- }
- }
- return pOutputStream;
- #elif defined(_XM_SSE_INTRINSICS_)
- static const XMVECTORF32 D = { { { -1.0f, 1.0f, 0.0f, 0.0f } } };
- XMVECTOR Scale = XMVectorSet(ViewportWidth * 0.5f, -ViewportHeight * 0.5f, ViewportMaxZ - ViewportMinZ, 1.0f);
- Scale = XMVectorReciprocal(Scale);
- XMVECTOR Offset = XMVectorSet(-ViewportX, -ViewportY, -ViewportMinZ, 0.0f);
- Offset = _mm_mul_ps(Scale, Offset);
- Offset = _mm_add_ps(Offset, D);
- XMMATRIX Transform = XMMatrixMultiply(World, View);
- Transform = XMMatrixMultiply(Transform, Projection);
- Transform = XMMatrixInverse(nullptr, Transform);
- const uint8_t* pInputVector = (const uint8_t*)pInputStream;
- uint8_t* pOutputVector = (uint8_t*)pOutputStream;
- size_t i = 0;
- size_t four = VectorCount >> 2;
- if ( four > 0 )
- {
- if (InputStride == sizeof(XMFLOAT3))
- {
- if (OutputStride == sizeof(XMFLOAT3))
- {
- if ( !((uintptr_t)pOutputStream & 0xF) )
- {
- // Packed input, aligned & packed output
- for (size_t j = 0; j < four; ++j)
- {
- __m128 V1 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector) );
- __m128 L2 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector+16) );
- __m128 L3 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector+32) );
- pInputVector += sizeof(XMFLOAT3)*4;
- // Unpack the 4 vectors (.w components are junk)
- XM3UNPACK3INTO4(V1,L2,L3);
- // Result 1
- V1 = _mm_mul_ps( V1, Scale );
- V1 = _mm_add_ps( V1, Offset );
- XMVECTOR Z = XM_PERMUTE_PS( V1, _MM_SHUFFLE(2, 2, 2, 2) );
- XMVECTOR Y = XM_PERMUTE_PS( V1, _MM_SHUFFLE(1, 1, 1, 1) );
- XMVECTOR X = XM_PERMUTE_PS( V1, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Z, Transform.r[2] );
- XMVECTOR vTemp2 = _mm_mul_ps( Y, Transform.r[1] );
- XMVECTOR vTemp3 = _mm_mul_ps( X, Transform.r[0] );
- vTemp = _mm_add_ps( vTemp, Transform.r[3] );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- XMVECTOR W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- V1 = _mm_div_ps( vTemp, W );
- // Result 2
- V2 = _mm_mul_ps( V2, Scale );
- V2 = _mm_add_ps( V2, Offset );
- Z = XM_PERMUTE_PS( V2, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V2, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V2, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, Transform.r[2] );
- vTemp2 = _mm_mul_ps( Y, Transform.r[1] );
- vTemp3 = _mm_mul_ps( X, Transform.r[0] );
- vTemp = _mm_add_ps( vTemp, Transform.r[3] );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- V2 = _mm_div_ps( vTemp, W );
- // Result 3
- V3 = _mm_mul_ps( V3, Scale );
- V3 = _mm_add_ps( V3, Offset );
- Z = XM_PERMUTE_PS( V3, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V3, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V3, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, Transform.r[2] );
- vTemp2 = _mm_mul_ps( Y, Transform.r[1] );
- vTemp3 = _mm_mul_ps( X, Transform.r[0] );
- vTemp = _mm_add_ps( vTemp, Transform.r[3] );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- V3 = _mm_div_ps( vTemp, W );
- // Result 4
- V4 = _mm_mul_ps( V4, Scale );
- V4 = _mm_add_ps( V4, Offset );
- Z = XM_PERMUTE_PS( V4, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V4, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V4, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, Transform.r[2] );
- vTemp2 = _mm_mul_ps( Y, Transform.r[1] );
- vTemp3 = _mm_mul_ps( X, Transform.r[0] );
- vTemp = _mm_add_ps( vTemp, Transform.r[3] );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- V4 = _mm_div_ps( vTemp, W );
- // Pack and store the vectors
- XM3PACK4INTO3(vTemp);
- XM_STREAM_PS( reinterpret_cast<float*>(pOutputVector), V1 );
- XM_STREAM_PS( reinterpret_cast<float*>(pOutputVector+16), vTemp );
- XM_STREAM_PS( reinterpret_cast<float*>(pOutputVector+32), V3 );
- pOutputVector += sizeof(XMFLOAT3)*4;
- i += 4;
- }
- }
- else
- {
- // Packed input, unaligned & packed output
- for (size_t j = 0; j < four; ++j)
- {
- __m128 V1 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector) );
- __m128 L2 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector+16) );
- __m128 L3 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector+32) );
- pInputVector += sizeof(XMFLOAT3)*4;
- // Unpack the 4 vectors (.w components are junk)
- XM3UNPACK3INTO4(V1,L2,L3);
- // Result 1
- V1 = _mm_mul_ps( V1, Scale );
- V1 = _mm_add_ps( V1, Offset );
- XMVECTOR Z = XM_PERMUTE_PS( V1, _MM_SHUFFLE(2, 2, 2, 2) );
- XMVECTOR Y = XM_PERMUTE_PS( V1, _MM_SHUFFLE(1, 1, 1, 1) );
- XMVECTOR X = XM_PERMUTE_PS( V1, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Z, Transform.r[2] );
- XMVECTOR vTemp2 = _mm_mul_ps( Y, Transform.r[1] );
- XMVECTOR vTemp3 = _mm_mul_ps( X, Transform.r[0] );
- vTemp = _mm_add_ps( vTemp, Transform.r[3] );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- XMVECTOR W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- V1 = _mm_div_ps( vTemp, W );
- // Result 2
- V2 = _mm_mul_ps( V2, Scale );
- V2 = _mm_add_ps( V2, Offset );
- Z = XM_PERMUTE_PS( V2, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V2, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V2, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, Transform.r[2] );
- vTemp2 = _mm_mul_ps( Y, Transform.r[1] );
- vTemp3 = _mm_mul_ps( X, Transform.r[0] );
- vTemp = _mm_add_ps( vTemp, Transform.r[3] );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- V2 = _mm_div_ps( vTemp, W );
- // Result 3
- V3 = _mm_mul_ps( V3, Scale );
- V3 = _mm_add_ps( V3, Offset );
- Z = XM_PERMUTE_PS( V3, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V3, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V3, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, Transform.r[2] );
- vTemp2 = _mm_mul_ps( Y, Transform.r[1] );
- vTemp3 = _mm_mul_ps( X, Transform.r[0] );
- vTemp = _mm_add_ps( vTemp, Transform.r[3] );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- V3 = _mm_div_ps( vTemp, W );
- // Result 4
- V4 = _mm_mul_ps( V4, Scale );
- V4 = _mm_add_ps( V4, Offset );
- Z = XM_PERMUTE_PS( V4, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V4, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V4, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, Transform.r[2] );
- vTemp2 = _mm_mul_ps( Y, Transform.r[1] );
- vTemp3 = _mm_mul_ps( X, Transform.r[0] );
- vTemp = _mm_add_ps( vTemp, Transform.r[3] );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- V4 = _mm_div_ps( vTemp, W );
- // Pack and store the vectors
- XM3PACK4INTO3(vTemp);
- _mm_storeu_ps( reinterpret_cast<float*>(pOutputVector), V1 );
- _mm_storeu_ps( reinterpret_cast<float*>(pOutputVector+16), vTemp );
- _mm_storeu_ps( reinterpret_cast<float*>(pOutputVector+32), V3 );
- pOutputVector += sizeof(XMFLOAT3)*4;
- i += 4;
- }
- }
- }
- else
- {
- // Packed input, unpacked output
- for (size_t j = 0; j < four; ++j)
- {
- __m128 V1 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector) );
- __m128 L2 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector+16) );
- __m128 L3 = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector+32) );
- pInputVector += sizeof(XMFLOAT3)*4;
- // Unpack the 4 vectors (.w components are junk)
- XM3UNPACK3INTO4(V1,L2,L3);
- // Result 1
- V1 = _mm_mul_ps( V1, Scale );
- V1 = _mm_add_ps( V1, Offset );
- XMVECTOR Z = XM_PERMUTE_PS( V1, _MM_SHUFFLE(2, 2, 2, 2) );
- XMVECTOR Y = XM_PERMUTE_PS( V1, _MM_SHUFFLE(1, 1, 1, 1) );
- XMVECTOR X = XM_PERMUTE_PS( V1, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Z, Transform.r[2] );
- XMVECTOR vTemp2 = _mm_mul_ps( Y, Transform.r[1] );
- XMVECTOR vTemp3 = _mm_mul_ps( X, Transform.r[0] );
- vTemp = _mm_add_ps( vTemp, Transform.r[3] );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- XMVECTOR W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- vTemp = _mm_div_ps( vTemp, W );
- XMStoreFloat3(reinterpret_cast<XMFLOAT3*>(pOutputVector), vTemp);
- pOutputVector += OutputStride;
- // Result 2
- V2 = _mm_mul_ps( V2, Scale );
- V2 = _mm_add_ps( V2, Offset );
- Z = XM_PERMUTE_PS( V2, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V2, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V2, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, Transform.r[2] );
- vTemp2 = _mm_mul_ps( Y, Transform.r[1] );
- vTemp3 = _mm_mul_ps( X, Transform.r[0] );
- vTemp = _mm_add_ps( vTemp, Transform.r[3] );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- vTemp = _mm_div_ps( vTemp, W );
- XMStoreFloat3(reinterpret_cast<XMFLOAT3*>(pOutputVector), vTemp);
- pOutputVector += OutputStride;
- // Result 3
- V3 = _mm_mul_ps( V3, Scale );
- V3 = _mm_add_ps( V3, Offset );
- Z = XM_PERMUTE_PS( V3, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V3, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V3, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, Transform.r[2] );
- vTemp2 = _mm_mul_ps( Y, Transform.r[1] );
- vTemp3 = _mm_mul_ps( X, Transform.r[0] );
- vTemp = _mm_add_ps( vTemp, Transform.r[3] );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- vTemp = _mm_div_ps( vTemp, W );
- XMStoreFloat3(reinterpret_cast<XMFLOAT3*>(pOutputVector), vTemp);
- pOutputVector += OutputStride;
- // Result 4
- V4 = _mm_mul_ps( V4, Scale );
- V4 = _mm_add_ps( V4, Offset );
- Z = XM_PERMUTE_PS( V4, _MM_SHUFFLE(2, 2, 2, 2) );
- Y = XM_PERMUTE_PS( V4, _MM_SHUFFLE(1, 1, 1, 1) );
- X = XM_PERMUTE_PS( V4, _MM_SHUFFLE(0, 0, 0, 0) );
- vTemp = _mm_mul_ps( Z, Transform.r[2] );
- vTemp2 = _mm_mul_ps( Y, Transform.r[1] );
- vTemp3 = _mm_mul_ps( X, Transform.r[0] );
- vTemp = _mm_add_ps( vTemp, Transform.r[3] );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- vTemp = _mm_div_ps( vTemp, W );
- XMStoreFloat3(reinterpret_cast<XMFLOAT3*>(pOutputVector), vTemp);
- pOutputVector += OutputStride;
- i += 4;
- }
- }
- }
- }
- for (; i < VectorCount; i++)
- {
- XMVECTOR V = XMLoadFloat3(reinterpret_cast<const XMFLOAT3*>(pInputVector));
- pInputVector += InputStride;
- V = _mm_mul_ps( V, Scale );
- V = _mm_add_ps( V, Offset );
- XMVECTOR Z = XM_PERMUTE_PS( V, _MM_SHUFFLE(2, 2, 2, 2) );
- XMVECTOR Y = XM_PERMUTE_PS( V, _MM_SHUFFLE(1, 1, 1, 1) );
- XMVECTOR X = XM_PERMUTE_PS( V, _MM_SHUFFLE(0, 0, 0, 0) );
- XMVECTOR vTemp = _mm_mul_ps( Z, Transform.r[2] );
- XMVECTOR vTemp2 = _mm_mul_ps( Y, Transform.r[1] );
- XMVECTOR vTemp3 = _mm_mul_ps( X, Transform.r[0] );
- vTemp = _mm_add_ps( vTemp, Transform.r[3] );
- vTemp = _mm_add_ps( vTemp, vTemp2 );
- vTemp = _mm_add_ps( vTemp, vTemp3 );
- XMVECTOR W = XM_PERMUTE_PS( vTemp, _MM_SHUFFLE(3, 3, 3, 3) );
- vTemp = _mm_div_ps( vTemp, W );
- XMStoreFloat3(reinterpret_cast<XMFLOAT3*>(pOutputVector), vTemp);
- pOutputVector += OutputStride;
- }
- XM_SFENCE();
- return pOutputStream;
- #endif
- }
- #ifdef _PREFAST_
- #pragma prefast(pop)
- #endif
- /****************************************************************************
- *
- * 4D Vector
- *
- ****************************************************************************/
- //------------------------------------------------------------------------------
- // Comparison operations
- //------------------------------------------------------------------------------
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector4Equal
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (((V1.vector4_f32[0] == V2.vector4_f32[0]) && (V1.vector4_f32[1] == V2.vector4_f32[1]) && (V1.vector4_f32[2] == V2.vector4_f32[2]) && (V1.vector4_f32[3] == V2.vector4_f32[3])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t vResult = vceqq_f32( V1, V2 );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- return ( vget_lane_u32(vTemp.val[1], 1) == 0xFFFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_cmpeq_ps(V1,V2);
- return ((_mm_movemask_ps(vTemp)==0x0f) != 0);
- #else
- return XMComparisonAllTrue(XMVector4EqualR(V1, V2));
- #endif
- }
- //------------------------------------------------------------------------------
- inline uint32_t XM_CALLCONV XMVector4EqualR
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- uint32_t CR = 0;
- if ((V1.vector4_f32[0] == V2.vector4_f32[0]) &&
- (V1.vector4_f32[1] == V2.vector4_f32[1]) &&
- (V1.vector4_f32[2] == V2.vector4_f32[2]) &&
- (V1.vector4_f32[3] == V2.vector4_f32[3]))
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if ((V1.vector4_f32[0] != V2.vector4_f32[0]) &&
- (V1.vector4_f32[1] != V2.vector4_f32[1]) &&
- (V1.vector4_f32[2] != V2.vector4_f32[2]) &&
- (V1.vector4_f32[3] != V2.vector4_f32[3]))
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t vResult = vceqq_f32( V1, V2 );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- uint32_t r = vget_lane_u32(vTemp.val[1], 1);
- uint32_t CR = 0;
- if ( r == 0xFFFFFFFFU )
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if ( !r )
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_cmpeq_ps(V1,V2);
- int iTest = _mm_movemask_ps(vTemp);
- uint32_t CR = 0;
- if (iTest==0xf) // All equal?
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if (iTest==0) // All not equal?
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector4EqualInt
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (((V1.vector4_u32[0] == V2.vector4_u32[0]) && (V1.vector4_u32[1] == V2.vector4_u32[1]) && (V1.vector4_u32[2] == V2.vector4_u32[2]) && (V1.vector4_u32[3] == V2.vector4_u32[3])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t vResult = vceqq_u32( V1, V2 );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- return ( vget_lane_u32(vTemp.val[1], 1) == 0xFFFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128i vTemp = _mm_cmpeq_epi32(_mm_castps_si128(V1),_mm_castps_si128(V2));
- return ((_mm_movemask_ps(_mm_castsi128_ps(vTemp))==0xf) != 0);
- #else
- return XMComparisonAllTrue(XMVector4EqualIntR(V1, V2));
- #endif
- }
- //------------------------------------------------------------------------------
- inline uint32_t XM_CALLCONV XMVector4EqualIntR
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- uint32_t CR = 0;
- if (V1.vector4_u32[0] == V2.vector4_u32[0] &&
- V1.vector4_u32[1] == V2.vector4_u32[1] &&
- V1.vector4_u32[2] == V2.vector4_u32[2] &&
- V1.vector4_u32[3] == V2.vector4_u32[3])
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if (V1.vector4_u32[0] != V2.vector4_u32[0] &&
- V1.vector4_u32[1] != V2.vector4_u32[1] &&
- V1.vector4_u32[2] != V2.vector4_u32[2] &&
- V1.vector4_u32[3] != V2.vector4_u32[3])
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t vResult = vceqq_u32( V1, V2 );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- uint32_t r = vget_lane_u32(vTemp.val[1], 1);
- uint32_t CR = 0;
- if ( r == 0xFFFFFFFFU )
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if ( !r )
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128i vTemp = _mm_cmpeq_epi32(_mm_castps_si128(V1),_mm_castps_si128(V2));
- int iTest = _mm_movemask_ps(_mm_castsi128_ps(vTemp));
- uint32_t CR = 0;
- if (iTest==0xf) // All equal?
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if (iTest==0) // All not equal?
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #endif
- }
- inline bool XM_CALLCONV XMVector4NearEqual
- (
- FXMVECTOR V1,
- FXMVECTOR V2,
- FXMVECTOR Epsilon
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- float dx, dy, dz, dw;
- dx = fabsf(V1.vector4_f32[0]-V2.vector4_f32[0]);
- dy = fabsf(V1.vector4_f32[1]-V2.vector4_f32[1]);
- dz = fabsf(V1.vector4_f32[2]-V2.vector4_f32[2]);
- dw = fabsf(V1.vector4_f32[3]-V2.vector4_f32[3]);
- return (((dx <= Epsilon.vector4_f32[0]) &&
- (dy <= Epsilon.vector4_f32[1]) &&
- (dz <= Epsilon.vector4_f32[2]) &&
- (dw <= Epsilon.vector4_f32[3])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- float32x4_t vDelta = vsubq_f32( V1, V2 );
- uint32x4_t vResult = vacleq_f32( vDelta, Epsilon );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- return ( vget_lane_u32(vTemp.val[1], 1) == 0xFFFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- // Get the difference
- XMVECTOR vDelta = _mm_sub_ps(V1,V2);
- // Get the absolute value of the difference
- XMVECTOR vTemp = _mm_setzero_ps();
- vTemp = _mm_sub_ps(vTemp,vDelta);
- vTemp = _mm_max_ps(vTemp,vDelta);
- vTemp = _mm_cmple_ps(vTemp,Epsilon);
- return ((_mm_movemask_ps(vTemp)==0xf) != 0);
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector4NotEqual
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (((V1.vector4_f32[0] != V2.vector4_f32[0]) || (V1.vector4_f32[1] != V2.vector4_f32[1]) || (V1.vector4_f32[2] != V2.vector4_f32[2]) || (V1.vector4_f32[3] != V2.vector4_f32[3])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t vResult = vceqq_f32( V1, V2 );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- return ( vget_lane_u32(vTemp.val[1], 1) != 0xFFFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_cmpneq_ps(V1,V2);
- return ((_mm_movemask_ps(vTemp)) != 0);
- #else
- return XMComparisonAnyFalse(XMVector4EqualR(V1, V2));
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector4NotEqualInt
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (((V1.vector4_u32[0] != V2.vector4_u32[0]) || (V1.vector4_u32[1] != V2.vector4_u32[1]) || (V1.vector4_u32[2] != V2.vector4_u32[2]) || (V1.vector4_u32[3] != V2.vector4_u32[3])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t vResult = vceqq_u32( V1, V2 );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- return ( vget_lane_u32(vTemp.val[1], 1) != 0xFFFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- __m128i vTemp = _mm_cmpeq_epi32(_mm_castps_si128(V1),_mm_castps_si128(V2));
- return ((_mm_movemask_ps(_mm_castsi128_ps(vTemp))!=0xF) != 0);
- #else
- return XMComparisonAnyFalse(XMVector4EqualIntR(V1, V2));
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector4Greater
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (((V1.vector4_f32[0] > V2.vector4_f32[0]) && (V1.vector4_f32[1] > V2.vector4_f32[1]) && (V1.vector4_f32[2] > V2.vector4_f32[2]) && (V1.vector4_f32[3] > V2.vector4_f32[3])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t vResult = vcgtq_f32( V1, V2 );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- return ( vget_lane_u32(vTemp.val[1], 1) == 0xFFFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_cmpgt_ps(V1,V2);
- return ((_mm_movemask_ps(vTemp)==0x0f) != 0);
- #else
- return XMComparisonAllTrue(XMVector4GreaterR(V1, V2));
- #endif
- }
- //------------------------------------------------------------------------------
- inline uint32_t XM_CALLCONV XMVector4GreaterR
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- uint32_t CR = 0;
- if (V1.vector4_f32[0] > V2.vector4_f32[0] &&
- V1.vector4_f32[1] > V2.vector4_f32[1] &&
- V1.vector4_f32[2] > V2.vector4_f32[2] &&
- V1.vector4_f32[3] > V2.vector4_f32[3])
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if (V1.vector4_f32[0] <= V2.vector4_f32[0] &&
- V1.vector4_f32[1] <= V2.vector4_f32[1] &&
- V1.vector4_f32[2] <= V2.vector4_f32[2] &&
- V1.vector4_f32[3] <= V2.vector4_f32[3])
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t vResult = vcgtq_f32( V1, V2 );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- uint32_t r = vget_lane_u32(vTemp.val[1], 1);
- uint32_t CR = 0;
- if ( r == 0xFFFFFFFFU )
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if ( !r )
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #elif defined(_XM_SSE_INTRINSICS_)
- uint32_t CR = 0;
- XMVECTOR vTemp = _mm_cmpgt_ps(V1,V2);
- int iTest = _mm_movemask_ps(vTemp);
- if (iTest==0xf) {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if (!iTest)
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector4GreaterOrEqual
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (((V1.vector4_f32[0] >= V2.vector4_f32[0]) && (V1.vector4_f32[1] >= V2.vector4_f32[1]) && (V1.vector4_f32[2] >= V2.vector4_f32[2]) && (V1.vector4_f32[3] >= V2.vector4_f32[3])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t vResult = vcgeq_f32( V1, V2 );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- return ( vget_lane_u32(vTemp.val[1], 1) == 0xFFFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_cmpge_ps(V1,V2);
- return ((_mm_movemask_ps(vTemp)==0x0f) != 0);
- #else
- return XMComparisonAllTrue(XMVector4GreaterOrEqualR(V1, V2));
- #endif
- }
- //------------------------------------------------------------------------------
- inline uint32_t XM_CALLCONV XMVector4GreaterOrEqualR
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- uint32_t CR = 0;
- if ((V1.vector4_f32[0] >= V2.vector4_f32[0]) &&
- (V1.vector4_f32[1] >= V2.vector4_f32[1]) &&
- (V1.vector4_f32[2] >= V2.vector4_f32[2]) &&
- (V1.vector4_f32[3] >= V2.vector4_f32[3]))
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if ((V1.vector4_f32[0] < V2.vector4_f32[0]) &&
- (V1.vector4_f32[1] < V2.vector4_f32[1]) &&
- (V1.vector4_f32[2] < V2.vector4_f32[2]) &&
- (V1.vector4_f32[3] < V2.vector4_f32[3]))
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t vResult = vcgeq_f32( V1, V2 );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- uint32_t r = vget_lane_u32(vTemp.val[1], 1);
- uint32_t CR = 0;
- if ( r == 0xFFFFFFFFU )
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if ( !r )
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #elif defined(_XM_SSE_INTRINSICS_)
- uint32_t CR = 0;
- XMVECTOR vTemp = _mm_cmpge_ps(V1,V2);
- int iTest = _mm_movemask_ps(vTemp);
- if (iTest==0x0f)
- {
- CR = XM_CRMASK_CR6TRUE;
- }
- else if (!iTest)
- {
- CR = XM_CRMASK_CR6FALSE;
- }
- return CR;
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector4Less
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (((V1.vector4_f32[0] < V2.vector4_f32[0]) && (V1.vector4_f32[1] < V2.vector4_f32[1]) && (V1.vector4_f32[2] < V2.vector4_f32[2]) && (V1.vector4_f32[3] < V2.vector4_f32[3])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t vResult = vcltq_f32( V1, V2 );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- return ( vget_lane_u32(vTemp.val[1], 1) == 0xFFFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_cmplt_ps(V1,V2);
- return ((_mm_movemask_ps(vTemp)==0x0f) != 0);
- #else
- return XMComparisonAllTrue(XMVector4GreaterR(V2, V1));
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector4LessOrEqual
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (((V1.vector4_f32[0] <= V2.vector4_f32[0]) && (V1.vector4_f32[1] <= V2.vector4_f32[1]) && (V1.vector4_f32[2] <= V2.vector4_f32[2]) && (V1.vector4_f32[3] <= V2.vector4_f32[3])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- uint32x4_t vResult = vcleq_f32( V1, V2 );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- return ( vget_lane_u32(vTemp.val[1], 1) == 0xFFFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp = _mm_cmple_ps(V1,V2);
- return ((_mm_movemask_ps(vTemp)==0x0f) != 0);
- #else
- return XMComparisonAllTrue(XMVector4GreaterOrEqualR(V2, V1));
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector4InBounds
- (
- FXMVECTOR V,
- FXMVECTOR Bounds
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (((V.vector4_f32[0] <= Bounds.vector4_f32[0] && V.vector4_f32[0] >= -Bounds.vector4_f32[0]) &&
- (V.vector4_f32[1] <= Bounds.vector4_f32[1] && V.vector4_f32[1] >= -Bounds.vector4_f32[1]) &&
- (V.vector4_f32[2] <= Bounds.vector4_f32[2] && V.vector4_f32[2] >= -Bounds.vector4_f32[2]) &&
- (V.vector4_f32[3] <= Bounds.vector4_f32[3] && V.vector4_f32[3] >= -Bounds.vector4_f32[3])) != 0);
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Test if less than or equal
- uint32x4_t ivTemp1 = vcleq_f32(V,Bounds);
- // Negate the bounds
- float32x4_t vTemp2 = vnegq_f32(Bounds);
- // Test if greater or equal (Reversed)
- uint32x4_t ivTemp2 = vcleq_f32(vTemp2,V);
- // Blend answers
- ivTemp1 = vandq_u32(ivTemp1,ivTemp2);
- // in bounds?
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(ivTemp1), vget_high_u8(ivTemp1));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- return ( vget_lane_u32(vTemp.val[1], 1) == 0xFFFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- // Test if less than or equal
- XMVECTOR vTemp1 = _mm_cmple_ps(V,Bounds);
- // Negate the bounds
- XMVECTOR vTemp2 = _mm_mul_ps(Bounds,g_XMNegativeOne);
- // Test if greater or equal (Reversed)
- vTemp2 = _mm_cmple_ps(vTemp2,V);
- // Blend answers
- vTemp1 = _mm_and_ps(vTemp1,vTemp2);
- // All in bounds?
- return ((_mm_movemask_ps(vTemp1)==0x0f) != 0);
- #else
- return XMComparisonAllInBounds(XMVector4InBoundsR(V, Bounds));
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector4IsNaN
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (XMISNAN(V.vector4_f32[0]) ||
- XMISNAN(V.vector4_f32[1]) ||
- XMISNAN(V.vector4_f32[2]) ||
- XMISNAN(V.vector4_f32[3]));
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Test against itself. NaN is always not equal
- uint32x4_t vTempNan = vceqq_f32( V, V );
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vTempNan), vget_high_u8(vTempNan));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- // If any are NaN, the mask is zero
- return ( vget_lane_u32(vTemp.val[1], 1) != 0xFFFFFFFFU );
- #elif defined(_XM_SSE_INTRINSICS_)
- // Test against itself. NaN is always not equal
- XMVECTOR vTempNan = _mm_cmpneq_ps(V,V);
- // If any are NaN, the mask is non-zero
- return (_mm_movemask_ps(vTempNan)!=0);
- #endif
- }
- //------------------------------------------------------------------------------
- inline bool XM_CALLCONV XMVector4IsInfinite
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- return (XMISINF(V.vector4_f32[0]) ||
- XMISINF(V.vector4_f32[1]) ||
- XMISINF(V.vector4_f32[2]) ||
- XMISINF(V.vector4_f32[3]));
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Mask off the sign bit
- uint32x4_t vTempInf = vandq_u32( V, g_XMAbsMask );
- // Compare to infinity
- vTempInf = vceqq_f32(vTempInf, g_XMInfinity );
- // If any are infinity, the signs are true.
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vTempInf), vget_high_u8(vTempInf));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- return ( vget_lane_u32(vTemp.val[1], 1) != 0 );
- #elif defined(_XM_SSE_INTRINSICS_)
- // Mask off the sign bit
- XMVECTOR vTemp = _mm_and_ps(V,g_XMAbsMask);
- // Compare to infinity
- vTemp = _mm_cmpeq_ps(vTemp,g_XMInfinity);
- // If any are infinity, the signs are true.
- return (_mm_movemask_ps(vTemp) != 0);
- #endif
- }
- //------------------------------------------------------------------------------
- // Computation operations
- //------------------------------------------------------------------------------
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector4Dot
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result;
- Result.f[0] =
- Result.f[1] =
- Result.f[2] =
- Result.f[3] = V1.vector4_f32[0] * V2.vector4_f32[0] + V1.vector4_f32[1] * V2.vector4_f32[1] + V1.vector4_f32[2] * V2.vector4_f32[2] + V1.vector4_f32[3] * V2.vector4_f32[3];
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- float32x4_t vTemp = vmulq_f32( V1, V2 );
- float32x2_t v1 = vget_low_f32( vTemp );
- float32x2_t v2 = vget_high_f32( vTemp );
- v1 = vadd_f32( v1, v2 );
- v1 = vpadd_f32( v1, v1 );
- return vcombine_f32( v1, v1 );
- #elif defined(_XM_SSE4_INTRINSICS_)
- return _mm_dp_ps( V1, V2, 0xff );
- #elif defined(_XM_SSE3_INTRINSICS_)
- XMVECTOR vTemp = _mm_mul_ps(V1, V2);
- vTemp = _mm_hadd_ps(vTemp, vTemp);
- return _mm_hadd_ps(vTemp, vTemp);
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR vTemp2 = V2;
- XMVECTOR vTemp = _mm_mul_ps(V1,vTemp2);
- vTemp2 = _mm_shuffle_ps(vTemp2,vTemp,_MM_SHUFFLE(1,0,0,0)); // Copy X to the Z position and Y to the W position
- vTemp2 = _mm_add_ps(vTemp2,vTemp); // Add Z = X+Z; W = Y+W;
- vTemp = _mm_shuffle_ps(vTemp,vTemp2,_MM_SHUFFLE(0,3,0,0)); // Copy W to the Z position
- vTemp = _mm_add_ps(vTemp,vTemp2); // Add Z and W together
- return XM_PERMUTE_PS(vTemp,_MM_SHUFFLE(2,2,2,2)); // Splat Z and return
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector4Cross
- (
- FXMVECTOR V1,
- FXMVECTOR V2,
- FXMVECTOR V3
- )
- {
- // [ ((v2.z*v3.w-v2.w*v3.z)*v1.y)-((v2.y*v3.w-v2.w*v3.y)*v1.z)+((v2.y*v3.z-v2.z*v3.y)*v1.w),
- // ((v2.w*v3.z-v2.z*v3.w)*v1.x)-((v2.w*v3.x-v2.x*v3.w)*v1.z)+((v2.z*v3.x-v2.x*v3.z)*v1.w),
- // ((v2.y*v3.w-v2.w*v3.y)*v1.x)-((v2.x*v3.w-v2.w*v3.x)*v1.y)+((v2.x*v3.y-v2.y*v3.x)*v1.w),
- // ((v2.z*v3.y-v2.y*v3.z)*v1.x)-((v2.z*v3.x-v2.x*v3.z)*v1.y)+((v2.y*v3.x-v2.x*v3.y)*v1.z) ]
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- (((V2.vector4_f32[2] * V3.vector4_f32[3]) - (V2.vector4_f32[3] * V3.vector4_f32[2]))*V1.vector4_f32[1]) - (((V2.vector4_f32[1] * V3.vector4_f32[3]) - (V2.vector4_f32[3] * V3.vector4_f32[1]))*V1.vector4_f32[2]) + (((V2.vector4_f32[1] * V3.vector4_f32[2]) - (V2.vector4_f32[2] * V3.vector4_f32[1]))*V1.vector4_f32[3]),
- (((V2.vector4_f32[3] * V3.vector4_f32[2]) - (V2.vector4_f32[2] * V3.vector4_f32[3]))*V1.vector4_f32[0]) - (((V2.vector4_f32[3] * V3.vector4_f32[0]) - (V2.vector4_f32[0] * V3.vector4_f32[3]))*V1.vector4_f32[2]) + (((V2.vector4_f32[2] * V3.vector4_f32[0]) - (V2.vector4_f32[0] * V3.vector4_f32[2]))*V1.vector4_f32[3]),
- (((V2.vector4_f32[1] * V3.vector4_f32[3]) - (V2.vector4_f32[3] * V3.vector4_f32[1]))*V1.vector4_f32[0]) - (((V2.vector4_f32[0] * V3.vector4_f32[3]) - (V2.vector4_f32[3] * V3.vector4_f32[0]))*V1.vector4_f32[1]) + (((V2.vector4_f32[0] * V3.vector4_f32[1]) - (V2.vector4_f32[1] * V3.vector4_f32[0]))*V1.vector4_f32[3]),
- (((V2.vector4_f32[2] * V3.vector4_f32[1]) - (V2.vector4_f32[1] * V3.vector4_f32[2]))*V1.vector4_f32[0]) - (((V2.vector4_f32[2] * V3.vector4_f32[0]) - (V2.vector4_f32[0] * V3.vector4_f32[2]))*V1.vector4_f32[1]) + (((V2.vector4_f32[1] * V3.vector4_f32[0]) - (V2.vector4_f32[0] * V3.vector4_f32[1]))*V1.vector4_f32[2]),
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- const float32x2_t select = vget_low_f32( g_XMMaskX );
- // Term1: V2zwyz * V3wzwy
- const float32x2_t v2xy = vget_low_f32(V2);
- const float32x2_t v2zw = vget_high_f32(V2);
- const float32x2_t v2yx = vrev64_f32(v2xy);
- const float32x2_t v2wz = vrev64_f32(v2zw);
- const float32x2_t v2yz = vbsl_f32( select, v2yx, v2wz );
- const float32x2_t v3zw = vget_high_f32(V3);
- const float32x2_t v3wz = vrev64_f32(v3zw);
- const float32x2_t v3xy = vget_low_f32(V3);
- const float32x2_t v3wy = vbsl_f32( select, v3wz, v3xy );
- float32x4_t vTemp1 = vcombine_f32(v2zw,v2yz);
- float32x4_t vTemp2 = vcombine_f32(v3wz,v3wy);
- XMVECTOR vResult = vmulq_f32( vTemp1, vTemp2 );
- // - V2wzwy * V3zwyz
- const float32x2_t v2wy = vbsl_f32( select, v2wz, v2xy );
- const float32x2_t v3yx = vrev64_f32(v3xy);
- const float32x2_t v3yz = vbsl_f32( select, v3yx, v3wz );
- vTemp1 = vcombine_f32(v2wz,v2wy);
- vTemp2 = vcombine_f32(v3zw,v3yz);
- vResult = vmlsq_f32( vResult, vTemp1, vTemp2 );
- // term1 * V1yxxx
- const float32x2_t v1xy = vget_low_f32(V1);
- const float32x2_t v1yx = vrev64_f32(v1xy);
- vTemp1 = vcombine_f32( v1yx, vdup_lane_f32( v1yx, 1 ) );
- vResult = vmulq_f32( vResult, vTemp1 );
- // Term2: V2ywxz * V3wxwx
- const float32x2_t v2yw = vrev64_f32(v2wy);
- const float32x2_t v2xz = vbsl_f32( select, v2xy, v2wz );
- const float32x2_t v3wx = vbsl_f32( select, v3wz, v3yx );
- vTemp1 = vcombine_f32(v2yw,v2xz);
- vTemp2 = vcombine_f32(v3wx,v3wx);
- float32x4_t vTerm = vmulq_f32( vTemp1, vTemp2 );
- // - V2wxwx * V3ywxz
- const float32x2_t v2wx = vbsl_f32( select, v2wz, v2yx );
- const float32x2_t v3yw = vrev64_f32(v3wy);
- const float32x2_t v3xz = vbsl_f32( select, v3xy, v3wz );
- vTemp1 = vcombine_f32(v2wx,v2wx);
- vTemp2 = vcombine_f32(v3yw,v3xz);
- vTerm = vmlsq_f32( vTerm, vTemp1, vTemp2 );
- // vResult - term2 * V1zzyy
- const float32x2_t v1zw = vget_high_f32(V1);
- vTemp1 = vcombine_f32( vdup_lane_f32(v1zw, 0), vdup_lane_f32(v1yx, 0) );
- vResult = vmlsq_f32( vResult, vTerm, vTemp1 );
- // Term3: V2yzxy * V3zxyx
- const float32x2_t v3zx = vrev64_f32(v3xz);
- vTemp1 = vcombine_f32(v2yz,v2xy);
- vTemp2 = vcombine_f32(v3zx,v3yx);
- vTerm = vmulq_f32( vTemp1, vTemp2 );
- // - V2zxyx * V3yzxy
- const float32x2_t v2zx = vrev64_f32(v2xz);
- vTemp1 = vcombine_f32(v2zx,v2yx);
- vTemp2 = vcombine_f32(v3yz,v3xy);
- vTerm = vmlsq_f32( vTerm, vTemp1, vTemp2 );
- // vResult + term3 * V1wwwz
- const float32x2_t v1wz = vrev64_f32(v1zw);
- vTemp1 = vcombine_f32( vdup_lane_f32( v1wz, 0 ), v1wz );
- return vmlaq_f32( vResult, vTerm, vTemp1 );
- #elif defined(_XM_SSE_INTRINSICS_)
- // V2zwyz * V3wzwy
- XMVECTOR vResult = XM_PERMUTE_PS(V2,_MM_SHUFFLE(2,1,3,2));
- XMVECTOR vTemp3 = XM_PERMUTE_PS(V3,_MM_SHUFFLE(1,3,2,3));
- vResult = _mm_mul_ps(vResult,vTemp3);
- // - V2wzwy * V3zwyz
- XMVECTOR vTemp2 = XM_PERMUTE_PS(V2,_MM_SHUFFLE(1,3,2,3));
- vTemp3 = XM_PERMUTE_PS(vTemp3,_MM_SHUFFLE(1,3,0,1));
- vTemp2 = _mm_mul_ps(vTemp2,vTemp3);
- vResult = _mm_sub_ps(vResult,vTemp2);
- // term1 * V1yxxx
- XMVECTOR vTemp1 = XM_PERMUTE_PS(V1,_MM_SHUFFLE(0,0,0,1));
- vResult = _mm_mul_ps(vResult,vTemp1);
- // V2ywxz * V3wxwx
- vTemp2 = XM_PERMUTE_PS(V2,_MM_SHUFFLE(2,0,3,1));
- vTemp3 = XM_PERMUTE_PS(V3,_MM_SHUFFLE(0,3,0,3));
- vTemp3 = _mm_mul_ps(vTemp3,vTemp2);
- // - V2wxwx * V3ywxz
- vTemp2 = XM_PERMUTE_PS(vTemp2,_MM_SHUFFLE(2,1,2,1));
- vTemp1 = XM_PERMUTE_PS(V3,_MM_SHUFFLE(2,0,3,1));
- vTemp2 = _mm_mul_ps(vTemp2,vTemp1);
- vTemp3 = _mm_sub_ps(vTemp3,vTemp2);
- // vResult - temp * V1zzyy
- vTemp1 = XM_PERMUTE_PS(V1,_MM_SHUFFLE(1,1,2,2));
- vTemp1 = _mm_mul_ps(vTemp1,vTemp3);
- vResult = _mm_sub_ps(vResult,vTemp1);
- // V2yzxy * V3zxyx
- vTemp2 = XM_PERMUTE_PS(V2,_MM_SHUFFLE(1,0,2,1));
- vTemp3 = XM_PERMUTE_PS(V3,_MM_SHUFFLE(0,1,0,2));
- vTemp3 = _mm_mul_ps(vTemp3,vTemp2);
- // - V2zxyx * V3yzxy
- vTemp2 = XM_PERMUTE_PS(vTemp2,_MM_SHUFFLE(2,0,2,1));
- vTemp1 = XM_PERMUTE_PS(V3,_MM_SHUFFLE(1,0,2,1));
- vTemp1 = _mm_mul_ps(vTemp1,vTemp2);
- vTemp3 = _mm_sub_ps(vTemp3,vTemp1);
- // vResult + term * V1wwwz
- vTemp1 = XM_PERMUTE_PS(V1,_MM_SHUFFLE(2,3,3,3));
- vTemp3 = _mm_mul_ps(vTemp3,vTemp1);
- vResult = _mm_add_ps(vResult,vTemp3);
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector4LengthSq
- (
- FXMVECTOR V
- )
- {
- return XMVector4Dot(V, V);
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector4ReciprocalLengthEst
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR Result;
- Result = XMVector4LengthSq(V);
- Result = XMVectorReciprocalSqrtEst(Result);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Dot4
- float32x4_t vTemp = vmulq_f32( V, V );
- float32x2_t v1 = vget_low_f32( vTemp );
- float32x2_t v2 = vget_high_f32( vTemp );
- v1 = vadd_f32( v1, v2 );
- v1 = vpadd_f32( v1, v1 );
- // Reciprocal sqrt (estimate)
- v2 = vrsqrte_f32( v1 );
- return vcombine_f32(v2, v2);
- #elif defined(_XM_SSE4_INTRINSICS_)
- XMVECTOR vTemp = _mm_dp_ps( V, V, 0xff );
- return _mm_rsqrt_ps( vTemp );
- #elif defined(_XM_SSE3_INTRINSICS_)
- XMVECTOR vLengthSq = _mm_mul_ps(V, V);
- vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
- vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
- vLengthSq = _mm_rsqrt_ps(vLengthSq);
- return vLengthSq;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Perform the dot product on x,y,z and w
- XMVECTOR vLengthSq = _mm_mul_ps(V,V);
- // vTemp has z and w
- XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(3,2,3,2));
- // x+z, y+w
- vLengthSq = _mm_add_ps(vLengthSq,vTemp);
- // x+z,x+z,x+z,y+w
- vLengthSq = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(1,0,0,0));
- // ??,??,y+w,y+w
- vTemp = _mm_shuffle_ps(vTemp,vLengthSq,_MM_SHUFFLE(3,3,0,0));
- // ??,??,x+z+y+w,??
- vLengthSq = _mm_add_ps(vLengthSq,vTemp);
- // Splat the length
- vLengthSq = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(2,2,2,2));
- // Get the reciprocal
- vLengthSq = _mm_rsqrt_ps(vLengthSq);
- return vLengthSq;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector4ReciprocalLength
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR Result;
- Result = XMVector4LengthSq(V);
- Result = XMVectorReciprocalSqrt(Result);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Dot4
- float32x4_t vTemp = vmulq_f32( V, V );
- float32x2_t v1 = vget_low_f32( vTemp );
- float32x2_t v2 = vget_high_f32( vTemp );
- v1 = vadd_f32( v1, v2 );
- v1 = vpadd_f32( v1, v1 );
- // Reciprocal sqrt
- float32x2_t S0 = vrsqrte_f32(v1);
- float32x2_t P0 = vmul_f32( v1, S0 );
- float32x2_t R0 = vrsqrts_f32( P0, S0 );
- float32x2_t S1 = vmul_f32( S0, R0 );
- float32x2_t P1 = vmul_f32( v1, S1 );
- float32x2_t R1 = vrsqrts_f32( P1, S1 );
- float32x2_t Result = vmul_f32( S1, R1 );
- return vcombine_f32( Result, Result );
- #elif defined(_XM_SSE4_INTRINSICS_)
- XMVECTOR vTemp = _mm_dp_ps( V, V, 0xff );
- XMVECTOR vLengthSq = _mm_sqrt_ps( vTemp );
- return _mm_div_ps( g_XMOne, vLengthSq );
- #elif defined(_XM_SSE3_INTRINSICS_)
- XMVECTOR vLengthSq = _mm_mul_ps(V, V);
- vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
- vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
- vLengthSq = _mm_sqrt_ps(vLengthSq);
- vLengthSq = _mm_div_ps(g_XMOne, vLengthSq);
- return vLengthSq;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Perform the dot product on x,y,z and w
- XMVECTOR vLengthSq = _mm_mul_ps(V,V);
- // vTemp has z and w
- XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(3,2,3,2));
- // x+z, y+w
- vLengthSq = _mm_add_ps(vLengthSq,vTemp);
- // x+z,x+z,x+z,y+w
- vLengthSq = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(1,0,0,0));
- // ??,??,y+w,y+w
- vTemp = _mm_shuffle_ps(vTemp,vLengthSq,_MM_SHUFFLE(3,3,0,0));
- // ??,??,x+z+y+w,??
- vLengthSq = _mm_add_ps(vLengthSq,vTemp);
- // Splat the length
- vLengthSq = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(2,2,2,2));
- // Get the reciprocal
- vLengthSq = _mm_sqrt_ps(vLengthSq);
- // Accurate!
- vLengthSq = _mm_div_ps(g_XMOne,vLengthSq);
- return vLengthSq;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector4LengthEst
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR Result;
- Result = XMVector4LengthSq(V);
- Result = XMVectorSqrtEst(Result);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Dot4
- float32x4_t vTemp = vmulq_f32( V, V );
- float32x2_t v1 = vget_low_f32( vTemp );
- float32x2_t v2 = vget_high_f32( vTemp );
- v1 = vadd_f32( v1, v2 );
- v1 = vpadd_f32( v1, v1 );
- const float32x2_t zero = vdup_n_f32(0);
- uint32x2_t VEqualsZero = vceq_f32( v1, zero );
- // Sqrt (estimate)
- float32x2_t Result = vrsqrte_f32( v1 );
- Result = vmul_f32( v1, Result );
- Result = vbsl_f32( VEqualsZero, zero, Result );
- return vcombine_f32( Result, Result );
- #elif defined(_XM_SSE4_INTRINSICS_)
- XMVECTOR vTemp = _mm_dp_ps( V, V, 0xff );
- return _mm_sqrt_ps( vTemp );
- #elif defined(_XM_SSE3_INTRINSICS_)
- XMVECTOR vLengthSq = _mm_mul_ps(V, V);
- vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
- vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
- vLengthSq = _mm_sqrt_ps(vLengthSq);
- return vLengthSq;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Perform the dot product on x,y,z and w
- XMVECTOR vLengthSq = _mm_mul_ps(V,V);
- // vTemp has z and w
- XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(3,2,3,2));
- // x+z, y+w
- vLengthSq = _mm_add_ps(vLengthSq,vTemp);
- // x+z,x+z,x+z,y+w
- vLengthSq = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(1,0,0,0));
- // ??,??,y+w,y+w
- vTemp = _mm_shuffle_ps(vTemp,vLengthSq,_MM_SHUFFLE(3,3,0,0));
- // ??,??,x+z+y+w,??
- vLengthSq = _mm_add_ps(vLengthSq,vTemp);
- // Splat the length
- vLengthSq = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(2,2,2,2));
- // Get the length
- vLengthSq = _mm_sqrt_ps(vLengthSq);
- return vLengthSq;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector4Length
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR Result;
- Result = XMVector4LengthSq(V);
- Result = XMVectorSqrt(Result);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Dot4
- float32x4_t vTemp = vmulq_f32( V, V );
- float32x2_t v1 = vget_low_f32( vTemp );
- float32x2_t v2 = vget_high_f32( vTemp );
- v1 = vadd_f32( v1, v2 );
- v1 = vpadd_f32( v1, v1 );
- const float32x2_t zero = vdup_n_f32(0);
- uint32x2_t VEqualsZero = vceq_f32( v1, zero );
- // Sqrt
- float32x2_t S0 = vrsqrte_f32( v1 );
- float32x2_t P0 = vmul_f32( v1, S0 );
- float32x2_t R0 = vrsqrts_f32( P0, S0 );
- float32x2_t S1 = vmul_f32( S0, R0 );
- float32x2_t P1 = vmul_f32( v1, S1 );
- float32x2_t R1 = vrsqrts_f32( P1, S1 );
- float32x2_t Result = vmul_f32( S1, R1 );
- Result = vmul_f32( v1, Result );
- Result = vbsl_f32( VEqualsZero, zero, Result );
- return vcombine_f32( Result, Result );
- #elif defined(_XM_SSE4_INTRINSICS_)
- XMVECTOR vTemp = _mm_dp_ps( V, V, 0xff );
- return _mm_sqrt_ps( vTemp );
- #elif defined(_XM_SSE3_INTRINSICS_)
- XMVECTOR vLengthSq = _mm_mul_ps(V, V);
- vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
- vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
- vLengthSq = _mm_sqrt_ps(vLengthSq);
- return vLengthSq;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Perform the dot product on x,y,z and w
- XMVECTOR vLengthSq = _mm_mul_ps(V,V);
- // vTemp has z and w
- XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(3,2,3,2));
- // x+z, y+w
- vLengthSq = _mm_add_ps(vLengthSq,vTemp);
- // x+z,x+z,x+z,y+w
- vLengthSq = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(1,0,0,0));
- // ??,??,y+w,y+w
- vTemp = _mm_shuffle_ps(vTemp,vLengthSq,_MM_SHUFFLE(3,3,0,0));
- // ??,??,x+z+y+w,??
- vLengthSq = _mm_add_ps(vLengthSq,vTemp);
- // Splat the length
- vLengthSq = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(2,2,2,2));
- // Get the length
- vLengthSq = _mm_sqrt_ps(vLengthSq);
- return vLengthSq;
- #endif
- }
- //------------------------------------------------------------------------------
- // XMVector4NormalizeEst uses a reciprocal estimate and
- // returns QNaN on zero and infinite vectors.
- inline XMVECTOR XM_CALLCONV XMVector4NormalizeEst
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR Result;
- Result = XMVector4ReciprocalLength(V);
- Result = XMVectorMultiply(V, Result);
- return Result;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Dot4
- float32x4_t vTemp = vmulq_f32( V, V );
- float32x2_t v1 = vget_low_f32( vTemp );
- float32x2_t v2 = vget_high_f32( vTemp );
- v1 = vadd_f32( v1, v2 );
- v1 = vpadd_f32( v1, v1 );
- // Reciprocal sqrt (estimate)
- v2 = vrsqrte_f32( v1 );
- // Normalize
- return vmulq_f32( V, vcombine_f32(v2,v2) );
- #elif defined(_XM_SSE4_INTRINSICS_)
- XMVECTOR vTemp = _mm_dp_ps( V, V, 0xff );
- XMVECTOR vResult = _mm_rsqrt_ps( vTemp );
- return _mm_mul_ps(vResult, V);
- #elif defined(_XM_SSE3_INTRINSICS_)
- XMVECTOR vDot = _mm_mul_ps(V, V);
- vDot = _mm_hadd_ps(vDot, vDot);
- vDot = _mm_hadd_ps(vDot, vDot);
- vDot = _mm_rsqrt_ps(vDot);
- vDot = _mm_mul_ps(vDot, V);
- return vDot;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Perform the dot product on x,y,z and w
- XMVECTOR vLengthSq = _mm_mul_ps(V,V);
- // vTemp has z and w
- XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(3,2,3,2));
- // x+z, y+w
- vLengthSq = _mm_add_ps(vLengthSq,vTemp);
- // x+z,x+z,x+z,y+w
- vLengthSq = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(1,0,0,0));
- // ??,??,y+w,y+w
- vTemp = _mm_shuffle_ps(vTemp,vLengthSq,_MM_SHUFFLE(3,3,0,0));
- // ??,??,x+z+y+w,??
- vLengthSq = _mm_add_ps(vLengthSq,vTemp);
- // Splat the length
- vLengthSq = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(2,2,2,2));
- // Get the reciprocal
- XMVECTOR vResult = _mm_rsqrt_ps(vLengthSq);
- // Reciprocal mul to perform the normalization
- vResult = _mm_mul_ps(vResult,V);
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector4Normalize
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- float fLength;
- XMVECTOR vResult;
- vResult = XMVector4Length( V );
- fLength = vResult.vector4_f32[0];
- // Prevent divide by zero
- if (fLength > 0) {
- fLength = 1.0f/fLength;
- }
-
- vResult.vector4_f32[0] = V.vector4_f32[0]*fLength;
- vResult.vector4_f32[1] = V.vector4_f32[1]*fLength;
- vResult.vector4_f32[2] = V.vector4_f32[2]*fLength;
- vResult.vector4_f32[3] = V.vector4_f32[3]*fLength;
- return vResult;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- // Dot4
- float32x4_t vTemp = vmulq_f32( V, V );
- float32x2_t v1 = vget_low_f32( vTemp );
- float32x2_t v2 = vget_high_f32( vTemp );
- v1 = vadd_f32( v1, v2 );
- v1 = vpadd_f32( v1, v1 );
- uint32x2_t VEqualsZero = vceq_f32( v1, vdup_n_f32(0) );
- uint32x2_t VEqualsInf = vceq_f32( v1, vget_low_f32(g_XMInfinity) );
- // Reciprocal sqrt (2 iterations of Newton-Raphson)
- float32x2_t S0 = vrsqrte_f32( v1 );
- float32x2_t P0 = vmul_f32( v1, S0 );
- float32x2_t R0 = vrsqrts_f32( P0, S0 );
- float32x2_t S1 = vmul_f32( S0, R0 );
- float32x2_t P1 = vmul_f32( v1, S1 );
- float32x2_t R1 = vrsqrts_f32( P1, S1 );
- v2 = vmul_f32( S1, R1 );
- // Normalize
- XMVECTOR vResult = vmulq_f32( V, vcombine_f32(v2,v2) );
- vResult = vbslq_f32( vcombine_f32(VEqualsZero,VEqualsZero), vdupq_n_f32(0), vResult );
- return vbslq_f32( vcombine_f32(VEqualsInf,VEqualsInf), g_XMQNaN, vResult );
- #elif defined(_XM_SSE4_INTRINSICS_)
- XMVECTOR vLengthSq = _mm_dp_ps( V, V, 0xff );
- // Prepare for the division
- XMVECTOR vResult = _mm_sqrt_ps(vLengthSq);
- // Create zero with a single instruction
- XMVECTOR vZeroMask = _mm_setzero_ps();
- // Test for a divide by zero (Must be FP to detect -0.0)
- vZeroMask = _mm_cmpneq_ps(vZeroMask,vResult);
- // Failsafe on zero (Or epsilon) length planes
- // If the length is infinity, set the elements to zero
- vLengthSq = _mm_cmpneq_ps(vLengthSq,g_XMInfinity);
- // Divide to perform the normalization
- vResult = _mm_div_ps(V,vResult);
- // Any that are infinity, set to zero
- vResult = _mm_and_ps(vResult,vZeroMask);
- // Select qnan or result based on infinite length
- XMVECTOR vTemp1 = _mm_andnot_ps(vLengthSq,g_XMQNaN);
- XMVECTOR vTemp2 = _mm_and_ps(vResult,vLengthSq);
- vResult = _mm_or_ps(vTemp1,vTemp2);
- return vResult;
- #elif defined(_XM_SSE3_INTRINSICS_)
- // Perform the dot product on x,y,z and w
- XMVECTOR vLengthSq = _mm_mul_ps(V, V);
- vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
- vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
- // Prepare for the division
- XMVECTOR vResult = _mm_sqrt_ps(vLengthSq);
- // Create zero with a single instruction
- XMVECTOR vZeroMask = _mm_setzero_ps();
- // Test for a divide by zero (Must be FP to detect -0.0)
- vZeroMask = _mm_cmpneq_ps(vZeroMask,vResult);
- // Failsafe on zero (Or epsilon) length planes
- // If the length is infinity, set the elements to zero
- vLengthSq = _mm_cmpneq_ps(vLengthSq,g_XMInfinity);
- // Divide to perform the normalization
- vResult = _mm_div_ps(V,vResult);
- // Any that are infinity, set to zero
- vResult = _mm_and_ps(vResult,vZeroMask);
- // Select qnan or result based on infinite length
- XMVECTOR vTemp1 = _mm_andnot_ps(vLengthSq,g_XMQNaN);
- XMVECTOR vTemp2 = _mm_and_ps(vResult,vLengthSq);
- vResult = _mm_or_ps(vTemp1,vTemp2);
- return vResult;
- #elif defined(_XM_SSE_INTRINSICS_)
- // Perform the dot product on x,y,z and w
- XMVECTOR vLengthSq = _mm_mul_ps(V,V);
- // vTemp has z and w
- XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(3,2,3,2));
- // x+z, y+w
- vLengthSq = _mm_add_ps(vLengthSq,vTemp);
- // x+z,x+z,x+z,y+w
- vLengthSq = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(1,0,0,0));
- // ??,??,y+w,y+w
- vTemp = _mm_shuffle_ps(vTemp,vLengthSq,_MM_SHUFFLE(3,3,0,0));
- // ??,??,x+z+y+w,??
- vLengthSq = _mm_add_ps(vLengthSq,vTemp);
- // Splat the length
- vLengthSq = XM_PERMUTE_PS(vLengthSq,_MM_SHUFFLE(2,2,2,2));
- // Prepare for the division
- XMVECTOR vResult = _mm_sqrt_ps(vLengthSq);
- // Create zero with a single instruction
- XMVECTOR vZeroMask = _mm_setzero_ps();
- // Test for a divide by zero (Must be FP to detect -0.0)
- vZeroMask = _mm_cmpneq_ps(vZeroMask,vResult);
- // Failsafe on zero (Or epsilon) length planes
- // If the length is infinity, set the elements to zero
- vLengthSq = _mm_cmpneq_ps(vLengthSq,g_XMInfinity);
- // Divide to perform the normalization
- vResult = _mm_div_ps(V,vResult);
- // Any that are infinity, set to zero
- vResult = _mm_and_ps(vResult,vZeroMask);
- // Select qnan or result based on infinite length
- XMVECTOR vTemp1 = _mm_andnot_ps(vLengthSq,g_XMQNaN);
- XMVECTOR vTemp2 = _mm_and_ps(vResult,vLengthSq);
- vResult = _mm_or_ps(vTemp1,vTemp2);
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector4ClampLength
- (
- FXMVECTOR V,
- float LengthMin,
- float LengthMax
- )
- {
- XMVECTOR ClampMax = XMVectorReplicate(LengthMax);
- XMVECTOR ClampMin = XMVectorReplicate(LengthMin);
- return XMVector4ClampLengthV(V, ClampMin, ClampMax);
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector4ClampLengthV
- (
- FXMVECTOR V,
- FXMVECTOR LengthMin,
- FXMVECTOR LengthMax
- )
- {
- assert((XMVectorGetY(LengthMin) == XMVectorGetX(LengthMin)) && (XMVectorGetZ(LengthMin) == XMVectorGetX(LengthMin)) && (XMVectorGetW(LengthMin) == XMVectorGetX(LengthMin)));
- assert((XMVectorGetY(LengthMax) == XMVectorGetX(LengthMax)) && (XMVectorGetZ(LengthMax) == XMVectorGetX(LengthMax)) && (XMVectorGetW(LengthMax) == XMVectorGetX(LengthMax)));
- assert(XMVector4GreaterOrEqual(LengthMin, XMVectorZero()));
- assert(XMVector4GreaterOrEqual(LengthMax, XMVectorZero()));
- assert(XMVector4GreaterOrEqual(LengthMax, LengthMin));
- XMVECTOR LengthSq = XMVector4LengthSq(V);
- const XMVECTOR Zero = XMVectorZero();
- XMVECTOR RcpLength = XMVectorReciprocalSqrt(LengthSq);
- XMVECTOR InfiniteLength = XMVectorEqualInt(LengthSq, g_XMInfinity.v);
- XMVECTOR ZeroLength = XMVectorEqual(LengthSq, Zero);
- XMVECTOR Normal = XMVectorMultiply(V, RcpLength);
- XMVECTOR Length = XMVectorMultiply(LengthSq, RcpLength);
- XMVECTOR Select = XMVectorEqualInt(InfiniteLength, ZeroLength);
- Length = XMVectorSelect(LengthSq, Length, Select);
- Normal = XMVectorSelect(LengthSq, Normal, Select);
- XMVECTOR ControlMax = XMVectorGreater(Length, LengthMax);
- XMVECTOR ControlMin = XMVectorLess(Length, LengthMin);
- XMVECTOR ClampLength = XMVectorSelect(Length, LengthMax, ControlMax);
- ClampLength = XMVectorSelect(ClampLength, LengthMin, ControlMin);
- XMVECTOR Result = XMVectorMultiply(Normal, ClampLength);
- // Preserve the original vector (with no precision loss) if the length falls within the given range
- XMVECTOR Control = XMVectorEqualInt(ControlMax, ControlMin);
- Result = XMVectorSelect(Result, V, Control);
- return Result;
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector4Reflect
- (
- FXMVECTOR Incident,
- FXMVECTOR Normal
- )
- {
- // Result = Incident - (2 * dot(Incident, Normal)) * Normal
- XMVECTOR Result = XMVector4Dot(Incident, Normal);
- Result = XMVectorAdd(Result, Result);
- Result = XMVectorNegativeMultiplySubtract(Result, Normal, Incident);
- return Result;
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector4Refract
- (
- FXMVECTOR Incident,
- FXMVECTOR Normal,
- float RefractionIndex
- )
- {
- XMVECTOR Index = XMVectorReplicate(RefractionIndex);
- return XMVector4RefractV(Incident, Normal, Index);
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector4RefractV
- (
- FXMVECTOR Incident,
- FXMVECTOR Normal,
- FXMVECTOR RefractionIndex
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTOR IDotN;
- XMVECTOR R;
- const XMVECTOR Zero = XMVectorZero();
- // Result = RefractionIndex * Incident - Normal * (RefractionIndex * dot(Incident, Normal) +
- // sqrt(1 - RefractionIndex * RefractionIndex * (1 - dot(Incident, Normal) * dot(Incident, Normal))))
- IDotN = XMVector4Dot(Incident, Normal);
- // R = 1.0f - RefractionIndex * RefractionIndex * (1.0f - IDotN * IDotN)
- R = XMVectorNegativeMultiplySubtract(IDotN, IDotN, g_XMOne.v);
- R = XMVectorMultiply(R, RefractionIndex);
- R = XMVectorNegativeMultiplySubtract(R, RefractionIndex, g_XMOne.v);
- if (XMVector4LessOrEqual(R, Zero))
- {
- // Total internal reflection
- return Zero;
- }
- else
- {
- XMVECTOR Result;
- // R = RefractionIndex * IDotN + sqrt(R)
- R = XMVectorSqrt(R);
- R = XMVectorMultiplyAdd(RefractionIndex, IDotN, R);
- // Result = RefractionIndex * Incident - Normal * R
- Result = XMVectorMultiply(RefractionIndex, Incident);
- Result = XMVectorNegativeMultiplySubtract(Normal, R, Result);
- return Result;
- }
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- XMVECTOR IDotN = XMVector4Dot(Incident,Normal);
- // R = 1.0f - RefractionIndex * RefractionIndex * (1.0f - IDotN * IDotN)
- float32x4_t R = vmlsq_f32( g_XMOne, IDotN, IDotN);
- R = vmulq_f32(R, RefractionIndex);
- R = vmlsq_f32(g_XMOne, R, RefractionIndex );
- uint32x4_t vResult = vcleq_f32(R,g_XMZero);
- int8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult));
- vTemp = vzip_u16(vTemp.val[0], vTemp.val[1]);
- if ( vget_lane_u32(vTemp.val[1], 1) == 0xFFFFFFFFU )
- {
- // Total internal reflection
- vResult = g_XMZero;
- }
- else
- {
- // Sqrt(R)
- float32x4_t S0 = vrsqrteq_f32(R);
- float32x4_t P0 = vmulq_f32( R, S0 );
- float32x4_t R0 = vrsqrtsq_f32( P0, S0 );
- float32x4_t S1 = vmulq_f32( S0, R0 );
- float32x4_t P1 = vmulq_f32( R, S1 );
- float32x4_t R1 = vrsqrtsq_f32( P1, S1 );
- float32x4_t S2 = vmulq_f32( S1, R1 );
- R = vmulq_f32( R, S2 );
- // R = RefractionIndex * IDotN + sqrt(R)
- R = vmlaq_f32( R, RefractionIndex, IDotN );
- // Result = RefractionIndex * Incident - Normal * R
- vResult = vmulq_f32(RefractionIndex, Incident);
- vResult = vmlsq_f32( vResult, R, Normal );
- }
- return vResult;
- #elif defined(_XM_SSE_INTRINSICS_)
- XMVECTOR IDotN = XMVector4Dot(Incident,Normal);
- // R = 1.0f - RefractionIndex * RefractionIndex * (1.0f - IDotN * IDotN)
- XMVECTOR R = _mm_mul_ps(IDotN,IDotN);
- R = _mm_sub_ps(g_XMOne,R);
- R = _mm_mul_ps(R, RefractionIndex);
- R = _mm_mul_ps(R, RefractionIndex);
- R = _mm_sub_ps(g_XMOne,R);
- XMVECTOR vResult = _mm_cmple_ps(R,g_XMZero);
- if (_mm_movemask_ps(vResult)==0x0f)
- {
- // Total internal reflection
- vResult = g_XMZero;
- }
- else
- {
- // R = RefractionIndex * IDotN + sqrt(R)
- R = _mm_sqrt_ps(R);
- vResult = _mm_mul_ps(RefractionIndex, IDotN);
- R = _mm_add_ps(R,vResult);
- // Result = RefractionIndex * Incident - Normal * R
- vResult = _mm_mul_ps(RefractionIndex, Incident);
- R = _mm_mul_ps(R,Normal);
- vResult = _mm_sub_ps(vResult,R);
- }
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector4Orthogonal
- (
- FXMVECTOR V
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- XMVECTORF32 Result = { { {
- V.vector4_f32[2],
- V.vector4_f32[3],
- -V.vector4_f32[0],
- -V.vector4_f32[1]
- } } };
- return Result.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- static const XMVECTORF32 Negate = { { { 1.f, 1.f, -1.f, -1.f } } };
- float32x4_t Result = vcombine_f32( vget_high_f32( V ), vget_low_f32( V ) );
- return vmulq_f32( Result, Negate );
- #elif defined(_XM_SSE_INTRINSICS_)
- static const XMVECTORF32 FlipZW = { { { 1.0f, 1.0f, -1.0f, -1.0f } } };
- XMVECTOR vResult = XM_PERMUTE_PS(V,_MM_SHUFFLE(1,0,3,2));
- vResult = _mm_mul_ps(vResult,FlipZW);
- return vResult;
- #endif
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector4AngleBetweenNormalsEst
- (
- FXMVECTOR N1,
- FXMVECTOR N2
- )
- {
- XMVECTOR Result = XMVector4Dot(N1, N2);
- Result = XMVectorClamp(Result, g_XMNegativeOne.v, g_XMOne.v);
- Result = XMVectorACosEst(Result);
- return Result;
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector4AngleBetweenNormals
- (
- FXMVECTOR N1,
- FXMVECTOR N2
- )
- {
- XMVECTOR Result = XMVector4Dot(N1, N2);
- Result = XMVectorClamp(Result, g_XMNegativeOne.v, g_XMOne.v);
- Result = XMVectorACos(Result);
- return Result;
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector4AngleBetweenVectors
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- XMVECTOR L1 = XMVector4ReciprocalLength(V1);
- XMVECTOR L2 = XMVector4ReciprocalLength(V2);
- XMVECTOR Dot = XMVector4Dot(V1, V2);
- L1 = XMVectorMultiply(L1, L2);
- XMVECTOR CosAngle = XMVectorMultiply(Dot, L1);
- CosAngle = XMVectorClamp(CosAngle, g_XMNegativeOne.v, g_XMOne.v);
- return XMVectorACos(CosAngle);
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV XMVector4Transform
- (
- FXMVECTOR V,
- FXMMATRIX M
- )
- {
- #if defined(_XM_NO_INTRINSICS_)
- float fX = (M.m[0][0]*V.vector4_f32[0])+(M.m[1][0]*V.vector4_f32[1])+(M.m[2][0]*V.vector4_f32[2])+(M.m[3][0]*V.vector4_f32[3]);
- float fY = (M.m[0][1]*V.vector4_f32[0])+(M.m[1][1]*V.vector4_f32[1])+(M.m[2][1]*V.vector4_f32[2])+(M.m[3][1]*V.vector4_f32[3]);
- float fZ = (M.m[0][2]*V.vector4_f32[0])+(M.m[1][2]*V.vector4_f32[1])+(M.m[2][2]*V.vector4_f32[2])+(M.m[3][2]*V.vector4_f32[3]);
- float fW = (M.m[0][3]*V.vector4_f32[0])+(M.m[1][3]*V.vector4_f32[1])+(M.m[2][3]*V.vector4_f32[2])+(M.m[3][3]*V.vector4_f32[3]);
- XMVECTORF32 vResult = { { { fX, fY, fZ, fW } } };
- return vResult.v;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- float32x2_t VL = vget_low_f32( V );
- XMVECTOR vResult = vmulq_lane_f32( M.r[0], VL, 0 ); // X
- vResult = vmlaq_lane_f32( vResult, M.r[1], VL, 1 ); // Y
- float32x2_t VH = vget_high_f32( V );
- vResult = vmlaq_lane_f32( vResult, M.r[2], VH, 0 ); // Z
- return vmlaq_lane_f32( vResult, M.r[3], VH, 1 ); // W
- #elif defined(_XM_SSE_INTRINSICS_)
- // Splat x,y,z and w
- XMVECTOR vTempX = XM_PERMUTE_PS(V,_MM_SHUFFLE(0,0,0,0));
- XMVECTOR vTempY = XM_PERMUTE_PS(V,_MM_SHUFFLE(1,1,1,1));
- XMVECTOR vTempZ = XM_PERMUTE_PS(V,_MM_SHUFFLE(2,2,2,2));
- XMVECTOR vTempW = XM_PERMUTE_PS(V,_MM_SHUFFLE(3,3,3,3));
- // Mul by the matrix
- vTempX = _mm_mul_ps(vTempX,M.r[0]);
- vTempY = _mm_mul_ps(vTempY,M.r[1]);
- vTempZ = _mm_mul_ps(vTempZ,M.r[2]);
- vTempW = _mm_mul_ps(vTempW,M.r[3]);
- // Add them all together
- vTempX = _mm_add_ps(vTempX,vTempY);
- vTempZ = _mm_add_ps(vTempZ,vTempW);
- vTempX = _mm_add_ps(vTempX,vTempZ);
- return vTempX;
- #endif
- }
- //------------------------------------------------------------------------------
- _Use_decl_annotations_
- inline XMFLOAT4* XM_CALLCONV XMVector4TransformStream
- (
- XMFLOAT4* pOutputStream,
- size_t OutputStride,
- const XMFLOAT4* pInputStream,
- size_t InputStride,
- size_t VectorCount,
- FXMMATRIX M
- )
- {
- assert(pOutputStream != nullptr);
- assert(pInputStream != nullptr);
- assert(InputStride >= sizeof(XMFLOAT4));
- _Analysis_assume_(InputStride >= sizeof(XMFLOAT4));
- assert(OutputStride >= sizeof(XMFLOAT4));
- _Analysis_assume_(OutputStride >= sizeof(XMFLOAT4));
- #if defined(_XM_NO_INTRINSICS_)
- const uint8_t* pInputVector = (const uint8_t*)pInputStream;
- uint8_t* pOutputVector = (uint8_t*)pOutputStream;
- const XMVECTOR row0 = M.r[0];
- const XMVECTOR row1 = M.r[1];
- const XMVECTOR row2 = M.r[2];
- const XMVECTOR row3 = M.r[3];
- for (size_t i = 0; i < VectorCount; i++)
- {
- XMVECTOR V = XMLoadFloat4((const XMFLOAT4*)pInputVector);
- XMVECTOR W = XMVectorSplatW(V);
- XMVECTOR Z = XMVectorSplatZ(V);
- XMVECTOR Y = XMVectorSplatY(V);
- XMVECTOR X = XMVectorSplatX(V);
- XMVECTOR Result = XMVectorMultiply(W, row3);
- Result = XMVectorMultiplyAdd(Z, row2, Result);
- Result = XMVectorMultiplyAdd(Y, row1, Result);
- Result = XMVectorMultiplyAdd(X, row0, Result);
- #ifdef _PREFAST_
- #pragma prefast(push)
- #pragma prefast(disable : 26015, "PREfast noise: Esp:1307" )
- #endif
- XMStoreFloat4((XMFLOAT4*)pOutputVector, Result);
- #ifdef _PREFAST_
- #pragma prefast(pop)
- #endif
- pInputVector += InputStride;
- pOutputVector += OutputStride;
- }
- return pOutputStream;
- #elif defined(_XM_ARM_NEON_INTRINSICS_)
- const uint8_t* pInputVector = (const uint8_t*)pInputStream;
- uint8_t* pOutputVector = (uint8_t*)pOutputStream;
- const XMVECTOR row0 = M.r[0];
- const XMVECTOR row1 = M.r[1];
- const XMVECTOR row2 = M.r[2];
- const XMVECTOR row3 = M.r[3];
- size_t i = 0;
- size_t four = VectorCount >> 2;
- if ( four > 0 )
- {
- if ((InputStride == sizeof(XMFLOAT4)) && (OutputStride == sizeof(XMFLOAT4)))
- {
- for (size_t j = 0; j < four; ++j)
- {
- float32x4x4_t V = vld4q_f32( reinterpret_cast<const float*>(pInputVector) );
- pInputVector += sizeof(XMFLOAT4)*4;
- float32x2_t r = vget_low_f32( row0 );
- XMVECTOR vResult0 = vmulq_lane_f32( V.val[0], r, 0 ); // Ax
- XMVECTOR vResult1 = vmulq_lane_f32( V.val[0], r, 1 ); // Bx
- __prefetch( pInputVector );
- r = vget_high_f32( row0 );
- XMVECTOR vResult2 = vmulq_lane_f32( V.val[0], r, 0 ); // Cx
- XMVECTOR vResult3 = vmulq_lane_f32( V.val[0], r, 1 ); // Dx
- __prefetch( pInputVector+XM_CACHE_LINE_SIZE );
- r = vget_low_f32( row1 );
- vResult0 = vmlaq_lane_f32( vResult0, V.val[1], r, 0 ); // Ax+Ey
- vResult1 = vmlaq_lane_f32( vResult1, V.val[1], r, 1 ); // Bx+Fy
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*2) );
- r = vget_high_f32( row1 );
- vResult2 = vmlaq_lane_f32( vResult2, V.val[1], r, 0 ); // Cx+Gy
- vResult3 = vmlaq_lane_f32( vResult3, V.val[1], r, 1 ); // Dx+Hy
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*3) );
- r = vget_low_f32( row2 );
- vResult0 = vmlaq_lane_f32( vResult0, V.val[2], r, 0 ); // Ax+Ey+Iz
- vResult1 = vmlaq_lane_f32( vResult1, V.val[2], r, 1 ); // Bx+Fy+Jz
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*4) );
- r = vget_high_f32( row2 );
- vResult2 = vmlaq_lane_f32( vResult2, V.val[2], r, 0 ); // Cx+Gy+Kz
- vResult3 = vmlaq_lane_f32( vResult3, V.val[2], r, 1 ); // Dx+Hy+Lz
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*5) );
- r = vget_low_f32( row3 );
- vResult0 = vmlaq_lane_f32( vResult0, V.val[3], r, 0 ); // Ax+Ey+Iz+Mw
- vResult1 = vmlaq_lane_f32( vResult1, V.val[3], r, 1 ); // Bx+Fy+Jz+Nw
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*6) );
- r = vget_high_f32( row3 );
- vResult2 = vmlaq_lane_f32( vResult2, V.val[3], r, 0 ); // Cx+Gy+Kz+Ow
- vResult3 = vmlaq_lane_f32( vResult3, V.val[3], r, 1 ); // Dx+Hy+Lz+Pw
- __prefetch( pInputVector+(XM_CACHE_LINE_SIZE*7) );
- V.val[0] = vResult0;
- V.val[1] = vResult1;
- V.val[2] = vResult2;
- V.val[3] = vResult3;
- vst4q_f32( reinterpret_cast<float*>(pOutputVector), V );
- pOutputVector += sizeof(XMFLOAT4)*4;
- i += 4;
- }
- }
- }
- for (; i < VectorCount; i++)
- {
- XMVECTOR V = vld1q_f32( reinterpret_cast<const float*>(pInputVector) );
- pInputVector += InputStride;
- float32x2_t VL = vget_low_f32( V );
- XMVECTOR vResult = vmulq_lane_f32( row0, VL, 0 ); // X
- vResult = vmlaq_lane_f32( vResult, row1, VL, 1 ); // Y
- float32x2_t VH = vget_high_f32( V );
- vResult = vmlaq_lane_f32( vResult, row2, VH, 0 ); // Z
- vResult = vmlaq_lane_f32( vResult, row3, VH, 1 ); // W
- vst1q_f32( reinterpret_cast<float*>(pOutputVector), vResult );
- pOutputVector += OutputStride;
- }
- return pOutputStream;
- #elif defined(_XM_SSE_INTRINSICS_)
- const uint8_t* pInputVector = (const uint8_t*)pInputStream;
- uint8_t* pOutputVector = (uint8_t*)pOutputStream;
- const XMVECTOR row0 = M.r[0];
- const XMVECTOR row1 = M.r[1];
- const XMVECTOR row2 = M.r[2];
- const XMVECTOR row3 = M.r[3];
- if ( !((uintptr_t)pOutputStream & 0xF) && !(OutputStride & 0xF) )
- {
- if ( !((uintptr_t)pInputStream & 0xF) && !(InputStride & 0xF) )
- {
- // Aligned input, aligned output
- for (size_t i = 0; i < VectorCount; i++)
- {
- __m128 V = _mm_load_ps( reinterpret_cast<const float*>(pInputVector) );
- pInputVector += InputStride;
-
- XMVECTOR vTempX = XM_PERMUTE_PS(V,_MM_SHUFFLE(0,0,0,0));
- XMVECTOR vTempY = XM_PERMUTE_PS(V,_MM_SHUFFLE(1,1,1,1));
- XMVECTOR vTempZ = XM_PERMUTE_PS(V,_MM_SHUFFLE(2,2,2,2));
- XMVECTOR vTempW = XM_PERMUTE_PS(V,_MM_SHUFFLE(3,3,3,3));
- vTempX = _mm_mul_ps(vTempX,row0);
- vTempY = _mm_mul_ps(vTempY,row1);
- vTempZ = _mm_mul_ps(vTempZ,row2);
- vTempW = _mm_mul_ps(vTempW,row3);
- vTempX = _mm_add_ps(vTempX,vTempY);
- vTempZ = _mm_add_ps(vTempZ,vTempW);
- vTempX = _mm_add_ps(vTempX,vTempZ);
- XM_STREAM_PS( reinterpret_cast<float*>(pOutputVector), vTempX );
- pOutputVector += OutputStride;
- }
- }
- else
- {
- // Unaligned input, aligned output
- for (size_t i = 0; i < VectorCount; i++)
- {
- __m128 V = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector) );
- pInputVector += InputStride;
-
- XMVECTOR vTempX = XM_PERMUTE_PS(V,_MM_SHUFFLE(0,0,0,0));
- XMVECTOR vTempY = XM_PERMUTE_PS(V,_MM_SHUFFLE(1,1,1,1));
- XMVECTOR vTempZ = XM_PERMUTE_PS(V,_MM_SHUFFLE(2,2,2,2));
- XMVECTOR vTempW = XM_PERMUTE_PS(V,_MM_SHUFFLE(3,3,3,3));
- vTempX = _mm_mul_ps(vTempX,row0);
- vTempY = _mm_mul_ps(vTempY,row1);
- vTempZ = _mm_mul_ps(vTempZ,row2);
- vTempW = _mm_mul_ps(vTempW,row3);
- vTempX = _mm_add_ps(vTempX,vTempY);
- vTempZ = _mm_add_ps(vTempZ,vTempW);
- vTempX = _mm_add_ps(vTempX,vTempZ);
- XM_STREAM_PS( reinterpret_cast<float*>(pOutputVector), vTempX );
- pOutputVector += OutputStride;
- }
- }
- }
- else
- {
- if ( !((uintptr_t)pInputStream & 0xF) && !(InputStride & 0xF) )
- {
- // Aligned input, unaligned output
- for (size_t i = 0; i < VectorCount; i++)
- {
- __m128 V = _mm_load_ps( reinterpret_cast<const float*>(pInputVector) );
- pInputVector += InputStride;
-
- XMVECTOR vTempX = XM_PERMUTE_PS(V,_MM_SHUFFLE(0,0,0,0));
- XMVECTOR vTempY = XM_PERMUTE_PS(V,_MM_SHUFFLE(1,1,1,1));
- XMVECTOR vTempZ = XM_PERMUTE_PS(V,_MM_SHUFFLE(2,2,2,2));
- XMVECTOR vTempW = XM_PERMUTE_PS(V,_MM_SHUFFLE(3,3,3,3));
- vTempX = _mm_mul_ps(vTempX,row0);
- vTempY = _mm_mul_ps(vTempY,row1);
- vTempZ = _mm_mul_ps(vTempZ,row2);
- vTempW = _mm_mul_ps(vTempW,row3);
- vTempX = _mm_add_ps(vTempX,vTempY);
- vTempZ = _mm_add_ps(vTempZ,vTempW);
- vTempX = _mm_add_ps(vTempX,vTempZ);
- _mm_storeu_ps( reinterpret_cast<float*>(pOutputVector), vTempX );
- pOutputVector += OutputStride;
- }
- }
- else
- {
- // Unaligned input, unaligned output
- for (size_t i = 0; i < VectorCount; i++)
- {
- __m128 V = _mm_loadu_ps( reinterpret_cast<const float*>(pInputVector) );
- pInputVector += InputStride;
-
- XMVECTOR vTempX = XM_PERMUTE_PS(V,_MM_SHUFFLE(0,0,0,0));
- XMVECTOR vTempY = XM_PERMUTE_PS(V,_MM_SHUFFLE(1,1,1,1));
- XMVECTOR vTempZ = XM_PERMUTE_PS(V,_MM_SHUFFLE(2,2,2,2));
- XMVECTOR vTempW = XM_PERMUTE_PS(V,_MM_SHUFFLE(3,3,3,3));
- vTempX = _mm_mul_ps(vTempX,row0);
- vTempY = _mm_mul_ps(vTempY,row1);
- vTempZ = _mm_mul_ps(vTempZ,row2);
- vTempW = _mm_mul_ps(vTempW,row3);
- vTempX = _mm_add_ps(vTempX,vTempY);
- vTempZ = _mm_add_ps(vTempZ,vTempW);
- vTempX = _mm_add_ps(vTempX,vTempZ);
- _mm_storeu_ps( reinterpret_cast<float*>(pOutputVector), vTempX );
- pOutputVector += OutputStride;
- }
- }
- }
- XM_SFENCE();
- return pOutputStream;
- #endif
- }
- /****************************************************************************
- *
- * XMVECTOR operators
- *
- ****************************************************************************/
- #ifndef _XM_NO_XMVECTOR_OVERLOADS_
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV operator+ (FXMVECTOR V)
- {
- return V;
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV operator- (FXMVECTOR V)
- {
- return XMVectorNegate(V);
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR& XM_CALLCONV operator+=
- (
- XMVECTOR& V1,
- FXMVECTOR V2
- )
- {
- V1 = XMVectorAdd(V1, V2);
- return V1;
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR& XM_CALLCONV operator-=
- (
- XMVECTOR& V1,
- FXMVECTOR V2
- )
- {
- V1 = XMVectorSubtract(V1, V2);
- return V1;
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR& XM_CALLCONV operator*=
- (
- XMVECTOR& V1,
- FXMVECTOR V2
- )
- {
- V1 = XMVectorMultiply(V1, V2);
- return V1;
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR& XM_CALLCONV operator/=
- (
- XMVECTOR& V1,
- FXMVECTOR V2
- )
- {
- V1 = XMVectorDivide(V1,V2);
- return V1;
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR& operator*=
- (
- XMVECTOR& V,
- const float S
- )
- {
- V = XMVectorScale(V, S);
- return V;
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR& operator/=
- (
- XMVECTOR& V,
- const float S
- )
- {
- XMVECTOR vS = XMVectorReplicate( S );
- V = XMVectorDivide(V, vS);
- return V;
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV operator+
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- return XMVectorAdd(V1, V2);
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV operator-
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- return XMVectorSubtract(V1, V2);
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV operator*
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- return XMVectorMultiply(V1, V2);
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV operator/
- (
- FXMVECTOR V1,
- FXMVECTOR V2
- )
- {
- return XMVectorDivide(V1,V2);
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV operator*
- (
- FXMVECTOR V,
- const float S
- )
- {
- return XMVectorScale(V, S);
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV operator/
- (
- FXMVECTOR V,
- const float S
- )
- {
- XMVECTOR vS = XMVectorReplicate( S );
- return XMVectorDivide(V, vS);
- }
- //------------------------------------------------------------------------------
- inline XMVECTOR XM_CALLCONV operator*
- (
- float S,
- FXMVECTOR V
- )
- {
- return XMVectorScale(V, S);
- }
- #endif /* !_XM_NO_XMVECTOR_OVERLOADS_ */
- #if defined(_XM_NO_INTRINSICS_)
- #undef XMISNAN
- #undef XMISINF
- #endif
- #if defined(_XM_SSE_INTRINSICS_)
- #undef XM3UNPACK3INTO4
- #undef XM3PACK4INTO3
- #endif
|