FastWindingNumberForSoups.h 272 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836
  1. // This header created by issuing: `echo "// This header created by issuing: \`$BASH_COMMAND\` $(echo "" | cat - LICENSE README.md | sed -e "s#^..*#\/\/ &#") $(echo "" | cat - SYS_Types.h SYS_Math.h VM_SSEFunc.h VM_SIMDFunc.h VM_SIMD.h UT_Array.h UT_ArrayImpl.h UT_SmallArray.h UT_FixedVector.h UT_ParallelUtil.h UT_BVH.h UT_BVHImpl.h UT_SolidAngle.h UT_Array.cpp UT_SolidAngle.cpp | sed -e "s/^#.*include *\".*$//g")" > ~/Repos/libigl/include/igl/FastWindingNumberForSoups.h`
  2. // MIT License
  3. // Copyright (c) 2018 Side Effects Software Inc.
  4. // Permission is hereby granted, free of charge, to any person obtaining a copy
  5. // of this software and associated documentation files (the "Software"), to deal
  6. // in the Software without restriction, including without limitation the rights
  7. // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  8. // copies of the Software, and to permit persons to whom the Software is
  9. // furnished to do so, subject to the following conditions:
  10. // The above copyright notice and this permission notice shall be included in all
  11. // copies or substantial portions of the Software.
  12. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  13. // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  14. // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  15. // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  16. // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  17. // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  18. // SOFTWARE.
  19. // # Fast Winding Numbers for Soups
  20. // https://github.com/alecjacobson/WindingNumber
  21. // Implementation of the _ACM SIGGRAPH_ 2018 paper,
  22. // "Fast Winding Numbers for Soups and Clouds"
  23. // Gavin Barill¹, Neil Dickson², Ryan Schmidt³, David I.W. Levin¹, Alec Jacobson¹
  24. // ¹University of Toronto, ²SideFX, ³Gradient Space
  25. // _Note: this implementation is for triangle soups only, not point clouds._
  26. // This version does _not_ depend on Intel TBB. Instead it depends on
  27. // [libigl](https://github.com/libigl/libigl)'s simpler `igl::parallel_for` (which
  28. // uses `std::thread`)
  29. // <del>This code, as written, depends on Intel's Threading Building Blocks (TBB) library for parallelism, but it should be fairly easy to change it to use any other means of threading, since it only uses parallel for loops with simple partitioning.</del>
  30. // The main class of interest is UT_SolidAngle and its init and computeSolidAngle functions, which you can use by including UT_SolidAngle.h, and whose implementation is mostly in UT_SolidAngle.cpp, using a 4-way bounding volume hierarchy (BVH) implemented in the UT_BVH.h and UT_BVHImpl.h headers. The rest of the files are mostly various supporting code. UT_SubtendedAngle, for computing angles subtended by 2D curves, can also be found in UT_SolidAngle.h and UT_SolidAngle.cpp .
  31. // An example of very similar code and how to use it to create a geometry operator (SOP) in Houdini can be found in the HDK examples (toolkit/samples/SOP/SOP_WindingNumber) for Houdini 16.5.121 and later. Query points go in the first input and the mesh geometry goes in the second input.
  32. // Create a single header using:
  33. // echo "// This header created by issuing: \`$BASH_COMMAND\` $(echo "" | cat - LICENSE README.md | sed -e "s#^..*#\/\/ &#") $(echo "" | cat - SYS_Types.h SYS_Math.h VM_SSEFunc.h VM_SIMD.h UT_Array.h UT_ArrayImpl.h UT_SmallArray.h UT_FixedVector.h UT_ParallelUtil.h UT_BVH.h UT_BVHImpl.h UT_SolidAngle.h UT_Array.cpp UT_SolidAngle.cpp | sed -e "s/^#.*include *\".*$//g")"
  34. /*
  35. * Copyright (c) 2018 Side Effects Software Inc.
  36. *
  37. * Permission is hereby granted, free of charge, to any person obtaining a copy
  38. * of this software and associated documentation files (the "Software"), to deal
  39. * in the Software without restriction, including without limitation the rights
  40. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  41. * copies of the Software, and to permit persons to whom the Software is
  42. * furnished to do so, subject to the following conditions:
  43. *
  44. * The above copyright notice and this permission notice shall be included in all
  45. * copies or substantial portions of the Software.
  46. *
  47. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  48. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  49. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  50. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  51. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  52. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  53. * SOFTWARE.
  54. *
  55. * COMMENTS:
  56. * Common type definitions.
  57. */
  58. #pragma once
  59. #ifndef __SYS_Types__
  60. #define __SYS_Types__
  61. /* Include system types */
  62. #include <limits>
  63. #include <type_traits>
  64. #include <sys/types.h>
  65. #include <stdint.h>
  66. namespace igl {
  67. /// @private
  68. namespace FastWindingNumber {
  69. /*
  70. * Integer types
  71. */
  72. typedef signed char int8;
  73. typedef unsigned char uint8;
  74. typedef short int16;
  75. typedef unsigned short uint16;
  76. typedef int int32;
  77. typedef unsigned int uint32;
  78. #ifndef MBSD
  79. typedef unsigned int uint;
  80. #endif
  81. /*
  82. * Avoid using uint64.
  83. * The extra bit of precision is NOT worth the cost in pain and suffering
  84. * induced by use of unsigned.
  85. */
  86. #if defined(_WIN32)
  87. typedef __int64 int64;
  88. typedef unsigned __int64 uint64;
  89. #elif defined(MBSD)
  90. // On MBSD, int64/uint64 are also defined in the system headers so we must
  91. // declare these in the same way or else we get conflicts.
  92. typedef int64_t int64;
  93. typedef uint64_t uint64;
  94. #elif defined(AMD64)
  95. typedef long int64;
  96. typedef unsigned long uint64;
  97. #else
  98. typedef long long int64;
  99. typedef unsigned long long uint64;
  100. #endif
  101. /// The problem with int64 is that it implies that it is a fixed 64-bit quantity
  102. /// that is saved to disk. Therefore, we need another integral type for
  103. /// indexing our arrays.
  104. typedef int64 exint;
  105. /// Mark function to be inlined. If this is done, taking the address of such
  106. /// a function is not allowed.
  107. #if defined(__GNUC__) || defined(__clang__)
  108. #define SYS_FORCE_INLINE __attribute__ ((always_inline)) inline
  109. #elif defined(_MSC_VER)
  110. #define SYS_FORCE_INLINE __forceinline
  111. #else
  112. #define SYS_FORCE_INLINE inline
  113. #endif
  114. /// Floating Point Types
  115. typedef float fpreal32;
  116. typedef double fpreal64;
  117. /// SYS_FPRealUnionT for type-safe casting with integral types
  118. template <typename T>
  119. union SYS_FPRealUnionT;
  120. template <>
  121. union SYS_FPRealUnionT<fpreal32>
  122. {
  123. typedef int32 int_type;
  124. typedef uint32 uint_type;
  125. typedef fpreal32 fpreal_type;
  126. enum {
  127. EXPONENT_BITS = 8,
  128. MANTISSA_BITS = 23,
  129. EXPONENT_BIAS = 127 };
  130. int_type ival;
  131. uint_type uval;
  132. fpreal_type fval;
  133. struct
  134. {
  135. uint_type mantissa_val: 23;
  136. uint_type exponent_val: 8;
  137. uint_type sign_val: 1;
  138. };
  139. };
  140. template <>
  141. union SYS_FPRealUnionT<fpreal64>
  142. {
  143. typedef int64 int_type;
  144. typedef uint64 uint_type;
  145. typedef fpreal64 fpreal_type;
  146. enum {
  147. EXPONENT_BITS = 11,
  148. MANTISSA_BITS = 52,
  149. EXPONENT_BIAS = 1023 };
  150. int_type ival;
  151. uint_type uval;
  152. fpreal_type fval;
  153. struct
  154. {
  155. uint_type mantissa_val: 52;
  156. uint_type exponent_val: 11;
  157. uint_type sign_val: 1;
  158. };
  159. };
  160. typedef union SYS_FPRealUnionT<fpreal32> SYS_FPRealUnionF;
  161. typedef union SYS_FPRealUnionT<fpreal64> SYS_FPRealUnionD;
  162. /// Asserts are disabled
  163. /// @{
  164. #define UT_ASSERT_P(ZZ) ((void)0)
  165. #define UT_ASSERT(ZZ) ((void)0)
  166. #define UT_ASSERT_MSG_P(ZZ, MM) ((void)0)
  167. #define UT_ASSERT_MSG(ZZ, MM) ((void)0)
  168. /// @}
  169. }}
  170. #endif
  171. /*
  172. * Copyright (c) 2018 Side Effects Software Inc.
  173. *
  174. * Permission is hereby granted, free of charge, to any person obtaining a copy
  175. * of this software and associated documentation files (the "Software"), to deal
  176. * in the Software without restriction, including without limitation the rights
  177. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  178. * copies of the Software, and to permit persons to whom the Software is
  179. * furnished to do so, subject to the following conditions:
  180. *
  181. * The above copyright notice and this permission notice shall be included in all
  182. * copies or substantial portions of the Software.
  183. *
  184. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  185. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  186. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  187. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  188. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  189. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  190. * SOFTWARE.
  191. *
  192. * COMMENTS:
  193. * Miscellaneous math functions.
  194. */
  195. #pragma once
  196. #ifndef __SYS_Math__
  197. #define __SYS_Math__
  198. #include <float.h>
  199. #include <limits>
  200. #include <math.h>
  201. namespace igl {
  202. /// @private
  203. namespace FastWindingNumber {
  204. // NOTE:
  205. // These have been carefully written so that in the case of equality
  206. // we always return the first parameter. This is so that NANs in
  207. // in the second parameter are suppressed.
  208. #define h_min(a, b) (((a) > (b)) ? (b) : (a))
  209. #define h_max(a, b) (((a) < (b)) ? (b) : (a))
  210. // DO NOT CHANGE THE ABOVE WITHOUT READING THE COMMENT
  211. #define h_abs(a) (((a) > 0) ? (a) : -(a))
  212. static constexpr inline int16 SYSmin(int16 a, int16 b) { return h_min(a,b); }
  213. static constexpr inline int16 SYSmax(int16 a, int16 b) { return h_max(a,b); }
  214. static constexpr inline int16 SYSabs(int16 a) { return h_abs(a); }
  215. static constexpr inline int32 SYSmin(int32 a, int32 b) { return h_min(a,b); }
  216. static constexpr inline int32 SYSmax(int32 a, int32 b) { return h_max(a,b); }
  217. static constexpr inline int32 SYSabs(int32 a) { return h_abs(a); }
  218. static constexpr inline int64 SYSmin(int64 a, int64 b) { return h_min(a,b); }
  219. static constexpr inline int64 SYSmax(int64 a, int64 b) { return h_max(a,b); }
  220. static constexpr inline int64 SYSmin(int32 a, int64 b) { return h_min(a,b); }
  221. static constexpr inline int64 SYSmax(int32 a, int64 b) { return h_max(a,b); }
  222. static constexpr inline int64 SYSmin(int64 a, int32 b) { return h_min(a,b); }
  223. static constexpr inline int64 SYSmax(int64 a, int32 b) { return h_max(a,b); }
  224. static constexpr inline int64 SYSabs(int64 a) { return h_abs(a); }
  225. static constexpr inline uint16 SYSmin(uint16 a, uint16 b) { return h_min(a,b); }
  226. static constexpr inline uint16 SYSmax(uint16 a, uint16 b) { return h_max(a,b); }
  227. static constexpr inline uint32 SYSmin(uint32 a, uint32 b) { return h_min(a,b); }
  228. static constexpr inline uint32 SYSmax(uint32 a, uint32 b) { return h_max(a,b); }
  229. static constexpr inline uint64 SYSmin(uint64 a, uint64 b) { return h_min(a,b); }
  230. static constexpr inline uint64 SYSmax(uint64 a, uint64 b) { return h_max(a,b); }
  231. static constexpr inline fpreal32 SYSmin(fpreal32 a, fpreal32 b) { return h_min(a,b); }
  232. static constexpr inline fpreal32 SYSmax(fpreal32 a, fpreal32 b) { return h_max(a,b); }
  233. static constexpr inline fpreal64 SYSmin(fpreal64 a, fpreal64 b) { return h_min(a,b); }
  234. static constexpr inline fpreal64 SYSmax(fpreal64 a, fpreal64 b) { return h_max(a,b); }
  235. // Some systems have size_t as a seperate type from uint. Some don't.
  236. #if (defined(LINUX) && defined(IA64)) || defined(MBSD)
  237. static constexpr inline size_t SYSmin(size_t a, size_t b) { return h_min(a,b); }
  238. static constexpr inline size_t SYSmax(size_t a, size_t b) { return h_max(a,b); }
  239. #endif
  240. #undef h_min
  241. #undef h_max
  242. #undef h_abs
  243. #define h_clamp(val, min, max, tol) \
  244. ((val <= min+tol) ? min : ((val >= max-tol) ? max : val))
  245. static constexpr inline int
  246. SYSclamp(int v, int min, int max)
  247. { return h_clamp(v, min, max, 0); }
  248. static constexpr inline uint
  249. SYSclamp(uint v, uint min, uint max)
  250. { return h_clamp(v, min, max, 0); }
  251. static constexpr inline int64
  252. SYSclamp(int64 v, int64 min, int64 max)
  253. { return h_clamp(v, min, max, int64(0)); }
  254. static constexpr inline uint64
  255. SYSclamp(uint64 v, uint64 min, uint64 max)
  256. { return h_clamp(v, min, max, uint64(0)); }
  257. static constexpr inline fpreal32
  258. SYSclamp(fpreal32 v, fpreal32 min, fpreal32 max, fpreal32 tol=(fpreal32)0)
  259. { return h_clamp(v, min, max, tol); }
  260. static constexpr inline fpreal64
  261. SYSclamp(fpreal64 v, fpreal64 min, fpreal64 max, fpreal64 tol=(fpreal64)0)
  262. { return h_clamp(v, min, max, tol); }
  263. #undef h_clamp
  264. static inline fpreal64 SYSsqrt(fpreal64 arg)
  265. { return ::sqrt(arg); }
  266. static inline fpreal32 SYSsqrt(fpreal32 arg)
  267. { return ::sqrtf(arg); }
  268. static inline fpreal64 SYSatan2(fpreal64 a, fpreal64 b)
  269. { return ::atan2(a, b); }
  270. static inline fpreal32 SYSatan2(fpreal32 a, fpreal32 b)
  271. { return ::atan2(a, b); }
  272. static inline fpreal32 SYSabs(fpreal32 a) { return ::fabsf(a); }
  273. static inline fpreal64 SYSabs(fpreal64 a) { return ::fabs(a); }
  274. }}
  275. #endif
  276. /*
  277. * Copyright (c) 2018 Side Effects Software Inc.
  278. *
  279. * Permission is hereby granted, free of charge, to any person obtaining a copy
  280. * of this software and associated documentation files (the "Software"), to deal
  281. * in the Software without restriction, including without limitation the rights
  282. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  283. * copies of the Software, and to permit persons to whom the Software is
  284. * furnished to do so, subject to the following conditions:
  285. *
  286. * The above copyright notice and this permission notice shall be included in all
  287. * copies or substantial portions of the Software.
  288. *
  289. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  290. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  291. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  292. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  293. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  294. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  295. * SOFTWARE.
  296. *
  297. * COMMENTS:
  298. * SIMD wrapper functions for SSE instructions
  299. */
  300. #pragma once
  301. #ifdef __SSE__
  302. #ifndef __VM_SSEFunc__
  303. #define __VM_SSEFunc__
  304. #if defined(_MSC_VER)
  305. #pragma warning(push)
  306. #pragma warning(disable:4799)
  307. #endif
  308. #define CPU_HAS_SIMD_INSTR 1
  309. #define VM_SSE_STYLE 1
  310. #include <emmintrin.h>
  311. #if defined(__SSE4_1__)
  312. #define VM_SSE41_STYLE 1
  313. #include <smmintrin.h>
  314. #endif
  315. #if defined(_MSC_VER)
  316. #pragma warning(pop)
  317. #endif
  318. namespace igl {
  319. /// @private
  320. namespace FastWindingNumber {
  321. typedef __m128 v4sf;
  322. typedef __m128i v4si;
  323. // Plain casting (no conversion)
  324. // MSVC has problems casting between __m128 and __m128i, so we implement a
  325. // custom casting routine specifically for windows.
  326. #if defined(_MSC_VER)
  327. static SYS_FORCE_INLINE v4sf
  328. vm_v4sf(const v4si &a)
  329. {
  330. union {
  331. v4si ival;
  332. v4sf fval;
  333. };
  334. ival = a;
  335. return fval;
  336. }
  337. static SYS_FORCE_INLINE v4si
  338. vm_v4si(const v4sf &a)
  339. {
  340. union {
  341. v4si ival;
  342. v4sf fval;
  343. };
  344. fval = a;
  345. return ival;
  346. }
  347. #define V4SF(A) vm_v4sf(A)
  348. #define V4SI(A) vm_v4si(A)
  349. #else
  350. #define V4SF(A) (v4sf)A
  351. #define V4SI(A) (v4si)A
  352. #endif
  353. #define VM_SHUFFLE_MASK(a0,a1, b0,b1) ((b1)<<6|(b0)<<4 | (a1)<<2|(a0))
  354. template <int mask>
  355. static SYS_FORCE_INLINE v4sf
  356. vm_shuffle(const v4sf &a, const v4sf &b)
  357. {
  358. return _mm_shuffle_ps(a, b, mask);
  359. }
  360. template <int mask>
  361. static SYS_FORCE_INLINE v4si
  362. vm_shuffle(const v4si &a, const v4si &b)
  363. {
  364. return V4SI(_mm_shuffle_ps(V4SF(a), V4SF(b), mask));
  365. }
  366. template <int A, int B, int C, int D, typename T>
  367. static SYS_FORCE_INLINE T
  368. vm_shuffle(const T &a, const T &b)
  369. {
  370. return vm_shuffle<VM_SHUFFLE_MASK(A,B,C,D)>(a, b);
  371. }
  372. template <int mask, typename T>
  373. static SYS_FORCE_INLINE T
  374. vm_shuffle(const T &a)
  375. {
  376. return vm_shuffle<mask>(a, a);
  377. }
  378. template <int A, int B, int C, int D, typename T>
  379. static SYS_FORCE_INLINE T
  380. vm_shuffle(const T &a)
  381. {
  382. return vm_shuffle<A,B,C,D>(a, a);
  383. }
  384. #if defined(VM_SSE41_STYLE)
  385. static SYS_FORCE_INLINE v4si
  386. vm_insert(const v4si v, int32 a, int n)
  387. {
  388. switch (n)
  389. {
  390. case 0: return _mm_insert_epi32(v, a, 0);
  391. case 1: return _mm_insert_epi32(v, a, 1);
  392. case 2: return _mm_insert_epi32(v, a, 2);
  393. case 3: return _mm_insert_epi32(v, a, 3);
  394. }
  395. return v;
  396. }
  397. static SYS_FORCE_INLINE v4sf
  398. vm_insert(const v4sf v, float a, int n)
  399. {
  400. switch (n)
  401. {
  402. case 0: return _mm_insert_ps(v, _mm_set_ss(a), _MM_MK_INSERTPS_NDX(0,0,0));
  403. case 1: return _mm_insert_ps(v, _mm_set_ss(a), _MM_MK_INSERTPS_NDX(0,1,0));
  404. case 2: return _mm_insert_ps(v, _mm_set_ss(a), _MM_MK_INSERTPS_NDX(0,2,0));
  405. case 3: return _mm_insert_ps(v, _mm_set_ss(a), _MM_MK_INSERTPS_NDX(0,3,0));
  406. }
  407. return v;
  408. }
  409. static SYS_FORCE_INLINE int
  410. vm_extract(const v4si v, int n)
  411. {
  412. switch (n)
  413. {
  414. case 0: return _mm_extract_epi32(v, 0);
  415. case 1: return _mm_extract_epi32(v, 1);
  416. case 2: return _mm_extract_epi32(v, 2);
  417. case 3: return _mm_extract_epi32(v, 3);
  418. }
  419. return 0;
  420. }
  421. static SYS_FORCE_INLINE float
  422. vm_extract(const v4sf v, int n)
  423. {
  424. SYS_FPRealUnionF tmp;
  425. switch (n)
  426. {
  427. case 0: tmp.ival = _mm_extract_ps(v, 0); break;
  428. case 1: tmp.ival = _mm_extract_ps(v, 1); break;
  429. case 2: tmp.ival = _mm_extract_ps(v, 2); break;
  430. case 3: tmp.ival = _mm_extract_ps(v, 3); break;
  431. }
  432. return tmp.fval;
  433. }
  434. #else
  435. static SYS_FORCE_INLINE v4si
  436. vm_insert(const v4si v, int32 a, int n)
  437. {
  438. union { v4si vector; int32 comp[4]; };
  439. vector = v;
  440. comp[n] = a;
  441. return vector;
  442. }
  443. static SYS_FORCE_INLINE v4sf
  444. vm_insert(const v4sf v, float a, int n)
  445. {
  446. union { v4sf vector; float comp[4]; };
  447. vector = v;
  448. comp[n] = a;
  449. return vector;
  450. }
  451. static SYS_FORCE_INLINE int
  452. vm_extract(const v4si v, int n)
  453. {
  454. union { v4si vector; int32 comp[4]; };
  455. vector = v;
  456. return comp[n];
  457. }
  458. static SYS_FORCE_INLINE float
  459. vm_extract(const v4sf v, int n)
  460. {
  461. union { v4sf vector; float comp[4]; };
  462. vector = v;
  463. return comp[n];
  464. }
  465. #endif
  466. static SYS_FORCE_INLINE v4sf
  467. vm_splats(float a)
  468. {
  469. return _mm_set1_ps(a);
  470. }
  471. static SYS_FORCE_INLINE v4si
  472. vm_splats(uint32 a)
  473. {
  474. SYS_FPRealUnionF tmp;
  475. tmp.uval = a;
  476. return V4SI(vm_splats(tmp.fval));
  477. }
  478. static SYS_FORCE_INLINE v4si
  479. vm_splats(int32 a)
  480. {
  481. SYS_FPRealUnionF tmp;
  482. tmp.ival = a;
  483. return V4SI(vm_splats(tmp.fval));
  484. }
  485. static SYS_FORCE_INLINE v4sf
  486. vm_splats(float a, float b, float c, float d)
  487. {
  488. return vm_shuffle<0,2,0,2>(
  489. vm_shuffle<0>(_mm_set_ss(a), _mm_set_ss(b)),
  490. vm_shuffle<0>(_mm_set_ss(c), _mm_set_ss(d)));
  491. }
  492. static SYS_FORCE_INLINE v4si
  493. vm_splats(uint32 a, uint32 b, uint32 c, uint32 d)
  494. {
  495. SYS_FPRealUnionF af, bf, cf, df;
  496. af.uval = a;
  497. bf.uval = b;
  498. cf.uval = c;
  499. df.uval = d;
  500. return V4SI(vm_splats(af.fval, bf.fval, cf.fval, df.fval));
  501. }
  502. static SYS_FORCE_INLINE v4si
  503. vm_splats(int32 a, int32 b, int32 c, int32 d)
  504. {
  505. SYS_FPRealUnionF af, bf, cf, df;
  506. af.ival = a;
  507. bf.ival = b;
  508. cf.ival = c;
  509. df.ival = d;
  510. return V4SI(vm_splats(af.fval, bf.fval, cf.fval, df.fval));
  511. }
  512. static SYS_FORCE_INLINE v4si
  513. vm_load(const int32 v[4])
  514. {
  515. return V4SI(_mm_loadu_ps((const float *)v));
  516. }
  517. static SYS_FORCE_INLINE v4sf
  518. vm_load(const float v[4])
  519. {
  520. return _mm_loadu_ps(v);
  521. }
  522. static SYS_FORCE_INLINE void
  523. vm_store(float dst[4], v4sf value)
  524. {
  525. _mm_storeu_ps(dst, value);
  526. }
  527. static SYS_FORCE_INLINE v4sf
  528. vm_negate(v4sf a)
  529. {
  530. return _mm_sub_ps(_mm_setzero_ps(), a);
  531. }
  532. static SYS_FORCE_INLINE v4sf
  533. vm_abs(v4sf a)
  534. {
  535. return _mm_max_ps(a, vm_negate(a));
  536. }
  537. static SYS_FORCE_INLINE v4sf
  538. vm_fdiv(v4sf a, v4sf b)
  539. {
  540. return _mm_mul_ps(a, _mm_rcp_ps(b));
  541. }
  542. static SYS_FORCE_INLINE v4sf
  543. vm_fsqrt(v4sf a)
  544. {
  545. return _mm_rcp_ps(_mm_rsqrt_ps(a));
  546. }
  547. static SYS_FORCE_INLINE v4sf
  548. vm_madd(v4sf a, v4sf b, v4sf c)
  549. {
  550. return _mm_add_ps(_mm_mul_ps(a, b), c);
  551. }
  552. static const v4si theSSETrue = vm_splats(0xFFFFFFFF);
  553. static SYS_FORCE_INLINE bool
  554. vm_allbits(const v4si &a)
  555. {
  556. return _mm_movemask_ps(V4SF(_mm_cmpeq_epi32(a, theSSETrue))) == 0xF;
  557. }
  558. #define VM_EXTRACT vm_extract
  559. #define VM_INSERT vm_insert
  560. #define VM_SPLATS vm_splats
  561. #define VM_LOAD vm_load
  562. #define VM_STORE vm_store
  563. #define VM_CMPLT(A,B) V4SI(_mm_cmplt_ps(A,B))
  564. #define VM_CMPLE(A,B) V4SI(_mm_cmple_ps(A,B))
  565. #define VM_CMPGT(A,B) V4SI(_mm_cmpgt_ps(A,B))
  566. #define VM_CMPGE(A,B) V4SI(_mm_cmpge_ps(A,B))
  567. #define VM_CMPEQ(A,B) V4SI(_mm_cmpeq_ps(A,B))
  568. #define VM_CMPNE(A,B) V4SI(_mm_cmpneq_ps(A,B))
  569. #define VM_ICMPLT _mm_cmplt_epi32
  570. #define VM_ICMPGT _mm_cmpgt_epi32
  571. #define VM_ICMPEQ _mm_cmpeq_epi32
  572. #define VM_IADD _mm_add_epi32
  573. #define VM_ISUB _mm_sub_epi32
  574. #define VM_ADD _mm_add_ps
  575. #define VM_SUB _mm_sub_ps
  576. #define VM_MUL _mm_mul_ps
  577. #define VM_DIV _mm_div_ps
  578. #define VM_SQRT _mm_sqrt_ps
  579. #define VM_ISQRT _mm_rsqrt_ps
  580. #define VM_INVERT _mm_rcp_ps
  581. #define VM_ABS vm_abs
  582. #define VM_FDIV vm_fdiv
  583. #define VM_NEG vm_negate
  584. #define VM_FSQRT vm_fsqrt
  585. #define VM_MADD vm_madd
  586. #define VM_MIN _mm_min_ps
  587. #define VM_MAX _mm_max_ps
  588. #define VM_AND _mm_and_si128
  589. #define VM_ANDNOT _mm_andnot_si128
  590. #define VM_OR _mm_or_si128
  591. #define VM_XOR _mm_xor_si128
  592. #define VM_ALLBITS vm_allbits
  593. #define VM_SHUFFLE vm_shuffle
  594. // Integer to float conversions
  595. #define VM_SSE_ROUND_MASK 0x6000
  596. #define VM_SSE_ROUND_ZERO 0x6000
  597. #define VM_SSE_ROUND_UP 0x4000
  598. #define VM_SSE_ROUND_DOWN 0x2000
  599. #define VM_SSE_ROUND_NEAR 0x0000
  600. #define GETROUND() (_mm_getcsr()&VM_SSE_ROUND_MASK)
  601. #define SETROUND(x) (_mm_setcsr(x|(_mm_getcsr()&~VM_SSE_ROUND_MASK)))
  602. // The P functions must be invoked before FLOOR, the E functions invoked
  603. // afterwards to reset the state.
  604. #define VM_P_FLOOR() uint rounding = GETROUND(); \
  605. SETROUND(VM_SSE_ROUND_DOWN);
  606. #define VM_FLOOR _mm_cvtps_epi32
  607. #define VM_INT _mm_cvttps_epi32
  608. #define VM_E_FLOOR() SETROUND(rounding);
  609. // Float to integer conversion
  610. #define VM_IFLOAT _mm_cvtepi32_ps
  611. }}
  612. #endif
  613. #endif
  614. #pragma once
  615. #ifndef __SSE__
  616. #ifndef __VM_SIMDFunc__
  617. #define __VM_SIMDFunc__
  618. #include <cmath>
  619. namespace igl {
  620. /// @private
  621. namespace FastWindingNumber {
  622. struct v4si {
  623. int32 v[4];
  624. };
  625. struct v4sf {
  626. float v[4];
  627. };
  628. static SYS_FORCE_INLINE v4sf V4SF(const v4si &v) {
  629. static_assert(sizeof(v4si) == sizeof(v4sf) && alignof(v4si) == alignof(v4sf), "v4si and v4sf must be compatible");
  630. return *(const v4sf*)&v;
  631. }
  632. static SYS_FORCE_INLINE v4si V4SI(const v4sf &v) {
  633. static_assert(sizeof(v4si) == sizeof(v4sf) && alignof(v4si) == alignof(v4sf), "v4si and v4sf must be compatible");
  634. return *(const v4si*)&v;
  635. }
  636. static SYS_FORCE_INLINE int32 conditionMask(bool c) {
  637. return c ? int32(0xFFFFFFFF) : 0;
  638. }
  639. static SYS_FORCE_INLINE v4sf
  640. VM_SPLATS(float f) {
  641. return v4sf{{f, f, f, f}};
  642. }
  643. static SYS_FORCE_INLINE v4si
  644. VM_SPLATS(uint32 i) {
  645. return v4si{{int32(i), int32(i), int32(i), int32(i)}};
  646. }
  647. static SYS_FORCE_INLINE v4si
  648. VM_SPLATS(int32 i) {
  649. return v4si{{i, i, i, i}};
  650. }
  651. static SYS_FORCE_INLINE v4sf
  652. VM_SPLATS(float a, float b, float c, float d) {
  653. return v4sf{{a, b, c, d}};
  654. }
  655. static SYS_FORCE_INLINE v4si
  656. VM_SPLATS(uint32 a, uint32 b, uint32 c, uint32 d) {
  657. return v4si{{int32(a), int32(b), int32(c), int32(d)}};
  658. }
  659. static SYS_FORCE_INLINE v4si
  660. VM_SPLATS(int32 a, int32 b, int32 c, int32 d) {
  661. return v4si{{a, b, c, d}};
  662. }
  663. static SYS_FORCE_INLINE v4si
  664. VM_LOAD(const int32 v[4]) {
  665. return v4si{{v[0], v[1], v[2], v[3]}};
  666. }
  667. static SYS_FORCE_INLINE v4sf
  668. VM_LOAD(const float v[4]) {
  669. return v4sf{{v[0], v[1], v[2], v[3]}};
  670. }
  671. static inline v4si VM_ICMPEQ(v4si a, v4si b) {
  672. return v4si{{
  673. conditionMask(a.v[0] == b.v[0]),
  674. conditionMask(a.v[1] == b.v[1]),
  675. conditionMask(a.v[2] == b.v[2]),
  676. conditionMask(a.v[3] == b.v[3])
  677. }};
  678. }
  679. static inline v4si VM_ICMPGT(v4si a, v4si b) {
  680. return v4si{{
  681. conditionMask(a.v[0] > b.v[0]),
  682. conditionMask(a.v[1] > b.v[1]),
  683. conditionMask(a.v[2] > b.v[2]),
  684. conditionMask(a.v[3] > b.v[3])
  685. }};
  686. }
  687. static inline v4si VM_ICMPLT(v4si a, v4si b) {
  688. return v4si{{
  689. conditionMask(a.v[0] < b.v[0]),
  690. conditionMask(a.v[1] < b.v[1]),
  691. conditionMask(a.v[2] < b.v[2]),
  692. conditionMask(a.v[3] < b.v[3])
  693. }};
  694. }
  695. static inline v4si VM_IADD(v4si a, v4si b) {
  696. return v4si{{
  697. (a.v[0] + b.v[0]),
  698. (a.v[1] + b.v[1]),
  699. (a.v[2] + b.v[2]),
  700. (a.v[3] + b.v[3])
  701. }};
  702. }
  703. static inline v4si VM_ISUB(v4si a, v4si b) {
  704. return v4si{{
  705. (a.v[0] - b.v[0]),
  706. (a.v[1] - b.v[1]),
  707. (a.v[2] - b.v[2]),
  708. (a.v[3] - b.v[3])
  709. }};
  710. }
  711. static inline v4si VM_OR(v4si a, v4si b) {
  712. return v4si{{
  713. (a.v[0] | b.v[0]),
  714. (a.v[1] | b.v[1]),
  715. (a.v[2] | b.v[2]),
  716. (a.v[3] | b.v[3])
  717. }};
  718. }
  719. static inline v4si VM_AND(v4si a, v4si b) {
  720. return v4si{{
  721. (a.v[0] & b.v[0]),
  722. (a.v[1] & b.v[1]),
  723. (a.v[2] & b.v[2]),
  724. (a.v[3] & b.v[3])
  725. }};
  726. }
  727. static inline v4si VM_ANDNOT(v4si a, v4si b) {
  728. return v4si{{
  729. ((~a.v[0]) & b.v[0]),
  730. ((~a.v[1]) & b.v[1]),
  731. ((~a.v[2]) & b.v[2]),
  732. ((~a.v[3]) & b.v[3])
  733. }};
  734. }
  735. static inline v4si VM_XOR(v4si a, v4si b) {
  736. return v4si{{
  737. (a.v[0] ^ b.v[0]),
  738. (a.v[1] ^ b.v[1]),
  739. (a.v[2] ^ b.v[2]),
  740. (a.v[3] ^ b.v[3])
  741. }};
  742. }
  743. static SYS_FORCE_INLINE int
  744. VM_EXTRACT(const v4si v, int index) {
  745. return v.v[index];
  746. }
  747. static SYS_FORCE_INLINE float
  748. VM_EXTRACT(const v4sf v, int index) {
  749. return v.v[index];
  750. }
  751. static SYS_FORCE_INLINE v4si
  752. VM_INSERT(v4si v, int32 value, int index) {
  753. v.v[index] = value;
  754. return v;
  755. }
  756. static SYS_FORCE_INLINE v4sf
  757. VM_INSERT(v4sf v, float value, int index) {
  758. v.v[index] = value;
  759. return v;
  760. }
  761. static inline v4si VM_CMPEQ(v4sf a, v4sf b) {
  762. return v4si{{
  763. conditionMask(a.v[0] == b.v[0]),
  764. conditionMask(a.v[1] == b.v[1]),
  765. conditionMask(a.v[2] == b.v[2]),
  766. conditionMask(a.v[3] == b.v[3])
  767. }};
  768. }
  769. static inline v4si VM_CMPNE(v4sf a, v4sf b) {
  770. return v4si{{
  771. conditionMask(a.v[0] != b.v[0]),
  772. conditionMask(a.v[1] != b.v[1]),
  773. conditionMask(a.v[2] != b.v[2]),
  774. conditionMask(a.v[3] != b.v[3])
  775. }};
  776. }
  777. static inline v4si VM_CMPGT(v4sf a, v4sf b) {
  778. return v4si{{
  779. conditionMask(a.v[0] > b.v[0]),
  780. conditionMask(a.v[1] > b.v[1]),
  781. conditionMask(a.v[2] > b.v[2]),
  782. conditionMask(a.v[3] > b.v[3])
  783. }};
  784. }
  785. static inline v4si VM_CMPLT(v4sf a, v4sf b) {
  786. return v4si{{
  787. conditionMask(a.v[0] < b.v[0]),
  788. conditionMask(a.v[1] < b.v[1]),
  789. conditionMask(a.v[2] < b.v[2]),
  790. conditionMask(a.v[3] < b.v[3])
  791. }};
  792. }
  793. static inline v4si VM_CMPGE(v4sf a, v4sf b) {
  794. return v4si{{
  795. conditionMask(a.v[0] >= b.v[0]),
  796. conditionMask(a.v[1] >= b.v[1]),
  797. conditionMask(a.v[2] >= b.v[2]),
  798. conditionMask(a.v[3] >= b.v[3])
  799. }};
  800. }
  801. static inline v4si VM_CMPLE(v4sf a, v4sf b) {
  802. return v4si{{
  803. conditionMask(a.v[0] <= b.v[0]),
  804. conditionMask(a.v[1] <= b.v[1]),
  805. conditionMask(a.v[2] <= b.v[2]),
  806. conditionMask(a.v[3] <= b.v[3])
  807. }};
  808. }
  809. static inline v4sf VM_ADD(v4sf a, v4sf b) {
  810. return v4sf{{
  811. (a.v[0] + b.v[0]),
  812. (a.v[1] + b.v[1]),
  813. (a.v[2] + b.v[2]),
  814. (a.v[3] + b.v[3])
  815. }};
  816. }
  817. static inline v4sf VM_SUB(v4sf a, v4sf b) {
  818. return v4sf{{
  819. (a.v[0] - b.v[0]),
  820. (a.v[1] - b.v[1]),
  821. (a.v[2] - b.v[2]),
  822. (a.v[3] - b.v[3])
  823. }};
  824. }
  825. static inline v4sf VM_NEG(v4sf a) {
  826. return v4sf{{
  827. (-a.v[0]),
  828. (-a.v[1]),
  829. (-a.v[2]),
  830. (-a.v[3])
  831. }};
  832. }
  833. static inline v4sf VM_MUL(v4sf a, v4sf b) {
  834. return v4sf{{
  835. (a.v[0] * b.v[0]),
  836. (a.v[1] * b.v[1]),
  837. (a.v[2] * b.v[2]),
  838. (a.v[3] * b.v[3])
  839. }};
  840. }
  841. static inline v4sf VM_DIV(v4sf a, v4sf b) {
  842. return v4sf{{
  843. (a.v[0] / b.v[0]),
  844. (a.v[1] / b.v[1]),
  845. (a.v[2] / b.v[2]),
  846. (a.v[3] / b.v[3])
  847. }};
  848. }
  849. static inline v4sf VM_MADD(v4sf a, v4sf b, v4sf c) {
  850. return v4sf{{
  851. (a.v[0] * b.v[0]) + c.v[0],
  852. (a.v[1] * b.v[1]) + c.v[1],
  853. (a.v[2] * b.v[2]) + c.v[2],
  854. (a.v[3] * b.v[3]) + c.v[3]
  855. }};
  856. }
  857. static inline v4sf VM_ABS(v4sf a) {
  858. return v4sf{{
  859. (a.v[0] < 0) ? -a.v[0] : a.v[0],
  860. (a.v[1] < 0) ? -a.v[1] : a.v[1],
  861. (a.v[2] < 0) ? -a.v[2] : a.v[2],
  862. (a.v[3] < 0) ? -a.v[3] : a.v[3]
  863. }};
  864. }
  865. static inline v4sf VM_MAX(v4sf a, v4sf b) {
  866. return v4sf{{
  867. (a.v[0] < b.v[0]) ? b.v[0] : a.v[0],
  868. (a.v[1] < b.v[1]) ? b.v[1] : a.v[1],
  869. (a.v[2] < b.v[2]) ? b.v[2] : a.v[2],
  870. (a.v[3] < b.v[3]) ? b.v[3] : a.v[3]
  871. }};
  872. }
  873. static inline v4sf VM_MIN(v4sf a, v4sf b) {
  874. return v4sf{{
  875. (a.v[0] > b.v[0]) ? b.v[0] : a.v[0],
  876. (a.v[1] > b.v[1]) ? b.v[1] : a.v[1],
  877. (a.v[2] > b.v[2]) ? b.v[2] : a.v[2],
  878. (a.v[3] > b.v[3]) ? b.v[3] : a.v[3]
  879. }};
  880. }
  881. static inline v4sf VM_INVERT(v4sf a) {
  882. return v4sf{{
  883. (1.0f/a.v[0]),
  884. (1.0f/a.v[1]),
  885. (1.0f/a.v[2]),
  886. (1.0f/a.v[3])
  887. }};
  888. }
  889. static inline v4sf VM_SQRT(v4sf a) {
  890. return v4sf{{
  891. std::sqrt(a.v[0]),
  892. std::sqrt(a.v[1]),
  893. std::sqrt(a.v[2]),
  894. std::sqrt(a.v[3])
  895. }};
  896. }
  897. static inline v4si VM_INT(v4sf a) {
  898. return v4si{{
  899. int32(a.v[0]),
  900. int32(a.v[1]),
  901. int32(a.v[2]),
  902. int32(a.v[3])
  903. }};
  904. }
  905. static inline v4sf VM_IFLOAT(v4si a) {
  906. return v4sf{{
  907. float(a.v[0]),
  908. float(a.v[1]),
  909. float(a.v[2]),
  910. float(a.v[3])
  911. }};
  912. }
  913. static SYS_FORCE_INLINE void VM_P_FLOOR() {}
  914. static SYS_FORCE_INLINE int32 singleIntFloor(float f) {
  915. // Casting to int32 usually truncates toward zero, instead of rounding down,
  916. // so subtract one if the result is above f.
  917. int32 i = int32(f);
  918. i -= (float(i) > f);
  919. return i;
  920. }
  921. static inline v4si VM_FLOOR(v4sf a) {
  922. return v4si{{
  923. singleIntFloor(a.v[0]),
  924. singleIntFloor(a.v[1]),
  925. singleIntFloor(a.v[2]),
  926. singleIntFloor(a.v[3])
  927. }};
  928. }
  929. static SYS_FORCE_INLINE void VM_E_FLOOR() {}
  930. static SYS_FORCE_INLINE bool vm_allbits(v4si a) {
  931. return (
  932. (a.v[0] == -1) &&
  933. (a.v[1] == -1) &&
  934. (a.v[2] == -1) &&
  935. (a.v[3] == -1)
  936. );
  937. }
  938. int SYS_FORCE_INLINE _mm_movemask_ps(const v4si& v) {
  939. return (
  940. int(v.v[0] < 0) |
  941. (int(v.v[1] < 0)<<1) |
  942. (int(v.v[2] < 0)<<2) |
  943. (int(v.v[3] < 0)<<3)
  944. );
  945. }
  946. int SYS_FORCE_INLINE _mm_movemask_ps(const v4sf& v) {
  947. // Use std::signbit just in case it needs to distinguish between +0 and -0
  948. // or between positive and negative NaN values (e.g. these could really
  949. // be integers instead of floats).
  950. return (
  951. int(std::signbit(v.v[0])) |
  952. (int(std::signbit(v.v[1]))<<1) |
  953. (int(std::signbit(v.v[2]))<<2) |
  954. (int(std::signbit(v.v[3]))<<3)
  955. );
  956. }
  957. }}
  958. #endif
  959. #endif
  960. /*
  961. * Copyright (c) 2018 Side Effects Software Inc.
  962. *
  963. * Permission is hereby granted, free of charge, to any person obtaining a copy
  964. * of this software and associated documentation files (the "Software"), to deal
  965. * in the Software without restriction, including without limitation the rights
  966. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  967. * copies of the Software, and to permit persons to whom the Software is
  968. * furnished to do so, subject to the following conditions:
  969. *
  970. * The above copyright notice and this permission notice shall be included in all
  971. * copies or substantial portions of the Software.
  972. *
  973. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  974. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  975. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  976. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  977. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  978. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  979. * SOFTWARE.
  980. *
  981. * COMMENTS:
  982. * SIMD wrapper classes for 4 floats or 4 ints
  983. */
  984. #pragma once
  985. #ifndef __HDK_VM_SIMD__
  986. #define __HDK_VM_SIMD__
  987. #include <cstdint>
  988. //#define FORCE_NON_SIMD
  989. namespace igl {
  990. /// @private
  991. namespace FastWindingNumber {
  992. class v4uf;
  993. class v4uu {
  994. public:
  995. SYS_FORCE_INLINE v4uu() {}
  996. SYS_FORCE_INLINE v4uu(const v4si &v) : vector(v) {}
  997. SYS_FORCE_INLINE v4uu(const v4uu &v) : vector(v.vector) {}
  998. explicit SYS_FORCE_INLINE v4uu(int32 v) { vector = VM_SPLATS(v); }
  999. explicit SYS_FORCE_INLINE v4uu(const int32 v[4])
  1000. { vector = VM_LOAD(v); }
  1001. SYS_FORCE_INLINE v4uu(int32 a, int32 b, int32 c, int32 d)
  1002. { vector = VM_SPLATS(a, b, c, d); }
  1003. // Assignment
  1004. SYS_FORCE_INLINE v4uu operator=(int32 v)
  1005. { vector = v4uu(v).vector; return *this; }
  1006. SYS_FORCE_INLINE v4uu operator=(v4si v)
  1007. { vector = v; return *this; }
  1008. SYS_FORCE_INLINE v4uu operator=(const v4uu &v)
  1009. { vector = v.vector; return *this; }
  1010. SYS_FORCE_INLINE void condAssign(const v4uu &val, const v4uu &c)
  1011. { *this = (c & val) | ((!c) & *this); }
  1012. // Comparison
  1013. SYS_FORCE_INLINE v4uu operator == (const v4uu &v) const
  1014. { return v4uu(VM_ICMPEQ(vector, v.vector)); }
  1015. SYS_FORCE_INLINE v4uu operator != (const v4uu &v) const
  1016. { return ~(*this == v); }
  1017. SYS_FORCE_INLINE v4uu operator > (const v4uu &v) const
  1018. { return v4uu(VM_ICMPGT(vector, v.vector)); }
  1019. SYS_FORCE_INLINE v4uu operator < (const v4uu &v) const
  1020. { return v4uu(VM_ICMPLT(vector, v.vector)); }
  1021. SYS_FORCE_INLINE v4uu operator >= (const v4uu &v) const
  1022. { return ~(*this < v); }
  1023. SYS_FORCE_INLINE v4uu operator <= (const v4uu &v) const
  1024. { return ~(*this > v); }
  1025. SYS_FORCE_INLINE v4uu operator == (int32 v) const { return *this == v4uu(v); }
  1026. SYS_FORCE_INLINE v4uu operator != (int32 v) const { return *this != v4uu(v); }
  1027. SYS_FORCE_INLINE v4uu operator > (int32 v) const { return *this > v4uu(v); }
  1028. SYS_FORCE_INLINE v4uu operator < (int32 v) const { return *this < v4uu(v); }
  1029. SYS_FORCE_INLINE v4uu operator >= (int32 v) const { return *this >= v4uu(v); }
  1030. SYS_FORCE_INLINE v4uu operator <= (int32 v) const { return *this <= v4uu(v); }
  1031. // Basic math
  1032. SYS_FORCE_INLINE v4uu operator+(const v4uu &r) const
  1033. { return v4uu(VM_IADD(vector, r.vector)); }
  1034. SYS_FORCE_INLINE v4uu operator-(const v4uu &r) const
  1035. { return v4uu(VM_ISUB(vector, r.vector)); }
  1036. SYS_FORCE_INLINE v4uu operator+=(const v4uu &r) { return (*this = *this + r); }
  1037. SYS_FORCE_INLINE v4uu operator-=(const v4uu &r) { return (*this = *this - r); }
  1038. SYS_FORCE_INLINE v4uu operator+(int32 r) const { return *this + v4uu(r); }
  1039. SYS_FORCE_INLINE v4uu operator-(int32 r) const { return *this - v4uu(r); }
  1040. SYS_FORCE_INLINE v4uu operator+=(int32 r) { return (*this = *this + r); }
  1041. SYS_FORCE_INLINE v4uu operator-=(int32 r) { return (*this = *this - r); }
  1042. // logical/bitwise
  1043. SYS_FORCE_INLINE v4uu operator||(const v4uu &r) const
  1044. { return v4uu(VM_OR(vector, r.vector)); }
  1045. SYS_FORCE_INLINE v4uu operator&&(const v4uu &r) const
  1046. { return v4uu(VM_AND(vector, r.vector)); }
  1047. SYS_FORCE_INLINE v4uu operator^(const v4uu &r) const
  1048. { return v4uu(VM_XOR(vector, r.vector)); }
  1049. SYS_FORCE_INLINE v4uu operator!() const
  1050. { return *this == v4uu(0); }
  1051. SYS_FORCE_INLINE v4uu operator|(const v4uu &r) const { return *this || r; }
  1052. SYS_FORCE_INLINE v4uu operator&(const v4uu &r) const { return *this && r; }
  1053. SYS_FORCE_INLINE v4uu operator~() const
  1054. { return *this ^ v4uu(0xFFFFFFFF); }
  1055. // component
  1056. SYS_FORCE_INLINE int32 operator[](int idx) const { return VM_EXTRACT(vector, idx); }
  1057. SYS_FORCE_INLINE void setComp(int idx, int32 v) { vector = VM_INSERT(vector, v, idx); }
  1058. v4uf toFloat() const;
  1059. public:
  1060. v4si vector;
  1061. };
  1062. class v4uf {
  1063. public:
  1064. SYS_FORCE_INLINE v4uf() {}
  1065. SYS_FORCE_INLINE v4uf(const v4sf &v) : vector(v) {}
  1066. SYS_FORCE_INLINE v4uf(const v4uf &v) : vector(v.vector) {}
  1067. explicit SYS_FORCE_INLINE v4uf(float v) { vector = VM_SPLATS(v); }
  1068. explicit SYS_FORCE_INLINE v4uf(const float v[4])
  1069. { vector = VM_LOAD(v); }
  1070. SYS_FORCE_INLINE v4uf(float a, float b, float c, float d)
  1071. { vector = VM_SPLATS(a, b, c, d); }
  1072. // Assignment
  1073. SYS_FORCE_INLINE v4uf operator=(float v)
  1074. { vector = v4uf(v).vector; return *this; }
  1075. SYS_FORCE_INLINE v4uf operator=(v4sf v)
  1076. { vector = v; return *this; }
  1077. SYS_FORCE_INLINE v4uf operator=(const v4uf &v)
  1078. { vector = v.vector; return *this; }
  1079. SYS_FORCE_INLINE void condAssign(const v4uf &val, const v4uu &c)
  1080. { *this = (val & c) | (*this & ~c); }
  1081. // Comparison
  1082. SYS_FORCE_INLINE v4uu operator == (const v4uf &v) const
  1083. { return v4uu(VM_CMPEQ(vector, v.vector)); }
  1084. SYS_FORCE_INLINE v4uu operator != (const v4uf &v) const
  1085. { return v4uu(VM_CMPNE(vector, v.vector)); }
  1086. SYS_FORCE_INLINE v4uu operator > (const v4uf &v) const
  1087. { return v4uu(VM_CMPGT(vector, v.vector)); }
  1088. SYS_FORCE_INLINE v4uu operator < (const v4uf &v) const
  1089. { return v4uu(VM_CMPLT(vector, v.vector)); }
  1090. SYS_FORCE_INLINE v4uu operator >= (const v4uf &v) const
  1091. { return v4uu(VM_CMPGE(vector, v.vector)); }
  1092. SYS_FORCE_INLINE v4uu operator <= (const v4uf &v) const
  1093. { return v4uu(VM_CMPLE(vector, v.vector)); }
  1094. SYS_FORCE_INLINE v4uu operator == (float v) const { return *this == v4uf(v); }
  1095. SYS_FORCE_INLINE v4uu operator != (float v) const { return *this != v4uf(v); }
  1096. SYS_FORCE_INLINE v4uu operator > (float v) const { return *this > v4uf(v); }
  1097. SYS_FORCE_INLINE v4uu operator < (float v) const { return *this < v4uf(v); }
  1098. SYS_FORCE_INLINE v4uu operator >= (float v) const { return *this >= v4uf(v); }
  1099. SYS_FORCE_INLINE v4uu operator <= (float v) const { return *this <= v4uf(v); }
  1100. // Basic math
  1101. SYS_FORCE_INLINE v4uf operator+(const v4uf &r) const
  1102. { return v4uf(VM_ADD(vector, r.vector)); }
  1103. SYS_FORCE_INLINE v4uf operator-(const v4uf &r) const
  1104. { return v4uf(VM_SUB(vector, r.vector)); }
  1105. SYS_FORCE_INLINE v4uf operator-() const
  1106. { return v4uf(VM_NEG(vector)); }
  1107. SYS_FORCE_INLINE v4uf operator*(const v4uf &r) const
  1108. { return v4uf(VM_MUL(vector, r.vector)); }
  1109. SYS_FORCE_INLINE v4uf operator/(const v4uf &r) const
  1110. { return v4uf(VM_DIV(vector, r.vector)); }
  1111. SYS_FORCE_INLINE v4uf operator+=(const v4uf &r) { return (*this = *this + r); }
  1112. SYS_FORCE_INLINE v4uf operator-=(const v4uf &r) { return (*this = *this - r); }
  1113. SYS_FORCE_INLINE v4uf operator*=(const v4uf &r) { return (*this = *this * r); }
  1114. SYS_FORCE_INLINE v4uf operator/=(const v4uf &r) { return (*this = *this / r); }
  1115. SYS_FORCE_INLINE v4uf operator+(float r) const { return *this + v4uf(r); }
  1116. SYS_FORCE_INLINE v4uf operator-(float r) const { return *this - v4uf(r); }
  1117. SYS_FORCE_INLINE v4uf operator*(float r) const { return *this * v4uf(r); }
  1118. SYS_FORCE_INLINE v4uf operator/(float r) const { return *this / v4uf(r); }
  1119. SYS_FORCE_INLINE v4uf operator+=(float r) { return (*this = *this + r); }
  1120. SYS_FORCE_INLINE v4uf operator-=(float r) { return (*this = *this - r); }
  1121. SYS_FORCE_INLINE v4uf operator*=(float r) { return (*this = *this * r); }
  1122. SYS_FORCE_INLINE v4uf operator/=(float r) { return (*this = *this / r); }
  1123. // logical/bitwise
  1124. SYS_FORCE_INLINE v4uf operator||(const v4uu &r) const
  1125. { return v4uf(V4SF(VM_OR(V4SI(vector), r.vector))); }
  1126. SYS_FORCE_INLINE v4uf operator&&(const v4uu &r) const
  1127. { return v4uf(V4SF(VM_AND(V4SI(vector), r.vector))); }
  1128. SYS_FORCE_INLINE v4uf operator^(const v4uu &r) const
  1129. { return v4uf(V4SF(VM_XOR(V4SI(vector), r.vector))); }
  1130. SYS_FORCE_INLINE v4uf operator!() const
  1131. { return v4uf(V4SF((*this == v4uf(0.0F)).vector)); }
  1132. SYS_FORCE_INLINE v4uf operator||(const v4uf &r) const
  1133. { return v4uf(V4SF(VM_OR(V4SI(vector), V4SI(r.vector)))); }
  1134. SYS_FORCE_INLINE v4uf operator&&(const v4uf &r) const
  1135. { return v4uf(V4SF(VM_AND(V4SI(vector), V4SI(r.vector)))); }
  1136. SYS_FORCE_INLINE v4uf operator^(const v4uf &r) const
  1137. { return v4uf(V4SF(VM_XOR(V4SI(vector), V4SI(r.vector)))); }
  1138. SYS_FORCE_INLINE v4uf operator|(const v4uu &r) const { return *this || r; }
  1139. SYS_FORCE_INLINE v4uf operator&(const v4uu &r) const { return *this && r; }
  1140. SYS_FORCE_INLINE v4uf operator~() const
  1141. { return *this ^ v4uu(0xFFFFFFFF); }
  1142. SYS_FORCE_INLINE v4uf operator|(const v4uf &r) const { return *this || r; }
  1143. SYS_FORCE_INLINE v4uf operator&(const v4uf &r) const { return *this && r; }
  1144. // component
  1145. SYS_FORCE_INLINE float operator[](int idx) const { return VM_EXTRACT(vector, idx); }
  1146. SYS_FORCE_INLINE void setComp(int idx, float v) { vector = VM_INSERT(vector, v, idx); }
  1147. // more math
  1148. SYS_FORCE_INLINE v4uf abs() const { return v4uf(VM_ABS(vector)); }
  1149. SYS_FORCE_INLINE v4uf clamp(const v4uf &low, const v4uf &high) const
  1150. { return v4uf(
  1151. VM_MIN(VM_MAX(vector, low.vector), high.vector)); }
  1152. SYS_FORCE_INLINE v4uf clamp(float low, float high) const
  1153. { return v4uf(VM_MIN(VM_MAX(vector,
  1154. v4uf(low).vector), v4uf(high).vector)); }
  1155. SYS_FORCE_INLINE v4uf recip() const { return v4uf(VM_INVERT(vector)); }
  1156. /// This is a lie, it is a signed int.
  1157. SYS_FORCE_INLINE v4uu toUnsignedInt() const { return VM_INT(vector); }
  1158. SYS_FORCE_INLINE v4uu toSignedInt() const { return VM_INT(vector); }
  1159. v4uu floor() const
  1160. {
  1161. VM_P_FLOOR();
  1162. v4uu result = VM_FLOOR(vector);
  1163. VM_E_FLOOR();
  1164. return result;
  1165. }
  1166. /// Returns the integer part of this float, this becomes the
  1167. /// 0..1 fractional component.
  1168. v4uu splitFloat()
  1169. {
  1170. v4uu base = toSignedInt();
  1171. *this -= base.toFloat();
  1172. return base;
  1173. }
  1174. #ifdef __SSE__
  1175. template <int A, int B, int C, int D>
  1176. SYS_FORCE_INLINE v4uf swizzle() const
  1177. {
  1178. return VM_SHUFFLE<A,B,C,D>(vector);
  1179. }
  1180. #endif
  1181. SYS_FORCE_INLINE v4uu isFinite() const
  1182. {
  1183. // If the exponent is the maximum value, it's either infinite or NaN.
  1184. const v4si mask = VM_SPLATS(0x7F800000);
  1185. return ~v4uu(VM_ICMPEQ(VM_AND(V4SI(vector), mask), mask));
  1186. }
  1187. public:
  1188. v4sf vector;
  1189. };
  1190. SYS_FORCE_INLINE v4uf
  1191. v4uu::toFloat() const
  1192. {
  1193. return v4uf(VM_IFLOAT(vector));
  1194. }
  1195. //
  1196. // Custom vector operations
  1197. //
  1198. static SYS_FORCE_INLINE v4uf
  1199. sqrt(const v4uf &a)
  1200. {
  1201. return v4uf(VM_SQRT(a.vector));
  1202. }
  1203. static SYS_FORCE_INLINE v4uf
  1204. fabs(const v4uf &a)
  1205. {
  1206. return a.abs();
  1207. }
  1208. // Use this operation to mask disabled values to 0
  1209. // rval = !a ? b : 0;
  1210. static SYS_FORCE_INLINE v4uf
  1211. andn(const v4uu &a, const v4uf &b)
  1212. {
  1213. return v4uf(V4SF(VM_ANDNOT(a.vector, V4SI(b.vector))));
  1214. }
  1215. static SYS_FORCE_INLINE v4uu
  1216. andn(const v4uu &a, const v4uu &b)
  1217. {
  1218. return v4uu(VM_ANDNOT(a.vector, b.vector));
  1219. }
  1220. // rval = a ? b : c;
  1221. static SYS_FORCE_INLINE v4uf
  1222. ternary(const v4uu &a, const v4uf &b, const v4uf &c)
  1223. {
  1224. return (b & a) | andn(a, c);
  1225. }
  1226. static SYS_FORCE_INLINE v4uu
  1227. ternary(const v4uu &a, const v4uu &b, const v4uu &c)
  1228. {
  1229. return (b & a) | andn(a, c);
  1230. }
  1231. // rval = !(a && b)
  1232. static SYS_FORCE_INLINE v4uu
  1233. nand(const v4uu &a, const v4uu &b)
  1234. {
  1235. return !v4uu(VM_AND(a.vector, b.vector));
  1236. }
  1237. static SYS_FORCE_INLINE v4uf
  1238. vmin(const v4uf &a, const v4uf &b)
  1239. {
  1240. return v4uf(VM_MIN(a.vector, b.vector));
  1241. }
  1242. static SYS_FORCE_INLINE v4uf
  1243. vmax(const v4uf &a, const v4uf &b)
  1244. {
  1245. return v4uf(VM_MAX(a.vector, b.vector));
  1246. }
  1247. static SYS_FORCE_INLINE v4uf
  1248. clamp(const v4uf &a, const v4uf &b, const v4uf &c)
  1249. {
  1250. return vmax(vmin(a, c), b);
  1251. }
  1252. static SYS_FORCE_INLINE v4uf
  1253. clamp(const v4uf &a, float b, float c)
  1254. {
  1255. return vmax(vmin(a, v4uf(c)), v4uf(b));
  1256. }
  1257. static SYS_FORCE_INLINE bool
  1258. allbits(const v4uu &a)
  1259. {
  1260. return vm_allbits(a.vector);
  1261. }
  1262. static SYS_FORCE_INLINE bool
  1263. anybits(const v4uu &a)
  1264. {
  1265. return !allbits(~a);
  1266. }
  1267. static SYS_FORCE_INLINE v4uf
  1268. madd(const v4uf &v, const v4uf &f, const v4uf &a)
  1269. {
  1270. return v4uf(VM_MADD(v.vector, f.vector, a.vector));
  1271. }
  1272. static SYS_FORCE_INLINE v4uf
  1273. madd(const v4uf &v, float f, float a)
  1274. {
  1275. return v4uf(VM_MADD(v.vector, v4uf(f).vector, v4uf(a).vector));
  1276. }
  1277. static SYS_FORCE_INLINE v4uf
  1278. madd(const v4uf &v, float f, const v4uf &a)
  1279. {
  1280. return v4uf(VM_MADD(v.vector, v4uf(f).vector, a.vector));
  1281. }
  1282. static SYS_FORCE_INLINE v4uf
  1283. msub(const v4uf &v, const v4uf &f, const v4uf &s)
  1284. {
  1285. return madd(v, f, -s);
  1286. }
  1287. static SYS_FORCE_INLINE v4uf
  1288. msub(const v4uf &v, float f, float s)
  1289. {
  1290. return madd(v, f, -s);
  1291. }
  1292. static SYS_FORCE_INLINE v4uf
  1293. lerp(const v4uf &a, const v4uf &b, const v4uf &w)
  1294. {
  1295. v4uf w1 = v4uf(1.0F) - w;
  1296. return madd(a, w1, b*w);
  1297. }
  1298. static SYS_FORCE_INLINE v4uf
  1299. luminance(const v4uf &r, const v4uf &g, const v4uf &b,
  1300. float rw, float gw, float bw)
  1301. {
  1302. return v4uf(madd(r, v4uf(rw), madd(g, v4uf(gw), b * bw)));
  1303. }
  1304. static SYS_FORCE_INLINE float
  1305. dot3(const v4uf &a, const v4uf &b)
  1306. {
  1307. v4uf res = a*b;
  1308. return res[0] + res[1] + res[2];
  1309. }
  1310. static SYS_FORCE_INLINE float
  1311. dot4(const v4uf &a, const v4uf &b)
  1312. {
  1313. v4uf res = a*b;
  1314. return res[0] + res[1] + res[2] + res[3];
  1315. }
  1316. static SYS_FORCE_INLINE float
  1317. length(const v4uf &a)
  1318. {
  1319. return SYSsqrt(dot3(a, a));
  1320. }
  1321. static SYS_FORCE_INLINE v4uf
  1322. normalize(const v4uf &a)
  1323. {
  1324. return a / length(a);
  1325. }
  1326. static SYS_FORCE_INLINE v4uf
  1327. cross(const v4uf &a, const v4uf &b)
  1328. {
  1329. return v4uf(a[1]*b[2] - a[2]*b[1],
  1330. a[2]*b[0] - a[0]*b[2],
  1331. a[0]*b[1] - a[1]*b[0], 0);
  1332. }
  1333. // Currently there is no specific support for signed integers
  1334. typedef v4uu v4ui;
  1335. // Assuming that ptr is an array of elements of type STYPE, this operation
  1336. // will return the index of the first element that is aligned to (1<<ASIZE)
  1337. // bytes.
  1338. #define VM_ALIGN(ptr, ASIZE, STYPE) \
  1339. ((((1<<ASIZE)-(intptr_t)ptr)&((1<<ASIZE)-1))/sizeof(STYPE))
  1340. }}
  1341. #endif
  1342. /*
  1343. * Copyright (c) 2018 Side Effects Software Inc.
  1344. *
  1345. * Permission is hereby granted, free of charge, to any person obtaining a copy
  1346. * of this software and associated documentation files (the "Software"), to deal
  1347. * in the Software without restriction, including without limitation the rights
  1348. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  1349. * copies of the Software, and to permit persons to whom the Software is
  1350. * furnished to do so, subject to the following conditions:
  1351. *
  1352. * The above copyright notice and this permission notice shall be included in all
  1353. * copies or substantial portions of the Software.
  1354. *
  1355. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  1356. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  1357. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  1358. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  1359. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  1360. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  1361. * SOFTWARE.
  1362. *
  1363. * COMMENTS:
  1364. * This is the array class implementation used by almost everything here.
  1365. */
  1366. #pragma once
  1367. #ifndef __UT_ARRAY_H_INCLUDED__
  1368. #define __UT_ARRAY_H_INCLUDED__
  1369. #include <algorithm>
  1370. #include <functional>
  1371. #include <type_traits>
  1372. #include <string.h>
  1373. namespace igl {
  1374. /// @private
  1375. namespace FastWindingNumber {
  1376. /// This routine describes how to change the size of an array.
  1377. /// It must increase the current_size by at least one!
  1378. ///
  1379. /// Current expected sequence of small sizes:
  1380. /// 4, 8, 16, 32, 48, 64, 80, 96, 112,
  1381. /// 128, 256, 384, 512, 640, 768, 896, 1024,
  1382. /// (increases by approx factor of 1.125 each time after this)
  1383. template <typename T>
  1384. static inline T
  1385. UTbumpAlloc(T current_size)
  1386. {
  1387. // NOTE: These must be powers of two. See below.
  1388. constexpr T SMALL_ALLOC(16);
  1389. constexpr T BIG_ALLOC(128);
  1390. // For small values, we increment by fixed amounts. For
  1391. // large values, we increment by one eighth of the current size.
  1392. // This prevents n^2 behaviour with allocation one element at a time.
  1393. // A factor of 1/8 will waste 1/16 the memory on average, and will
  1394. // double the size of the array in approximately 6 reallocations.
  1395. if (current_size < T(8))
  1396. {
  1397. return (current_size < T(4)) ? T(4) : T(8);
  1398. }
  1399. if (current_size < T(BIG_ALLOC))
  1400. {
  1401. // Snap up to next multiple of SMALL_ALLOC (must be power of 2)
  1402. return (current_size + T(SMALL_ALLOC)) & ~T(SMALL_ALLOC-1);
  1403. }
  1404. if (current_size < T(BIG_ALLOC * 8))
  1405. {
  1406. // Snap up to next multiple of BIG_ALLOC (must be power of 2)
  1407. return (current_size + T(BIG_ALLOC)) & ~T(BIG_ALLOC-1);
  1408. }
  1409. T bump = current_size >> 3; // Divided by 8.
  1410. current_size += bump;
  1411. return current_size;
  1412. }
  1413. template <typename T>
  1414. class UT_Array
  1415. {
  1416. public:
  1417. typedef T value_type;
  1418. typedef int (*Comparator)(const T *, const T *);
  1419. /// Copy constructor. It duplicates the data.
  1420. /// It's marked explicit so that it's not accidentally passed by value.
  1421. /// You can always pass by reference and then copy it, if needed.
  1422. /// If you have a line like:
  1423. /// UT_Array<int> a = otherarray;
  1424. /// and it really does need to copy instead of referencing,
  1425. /// you can rewrite it as:
  1426. /// UT_Array<int> a(otherarray);
  1427. inline explicit UT_Array(const UT_Array<T> &a);
  1428. /// Move constructor. Steals the working data from the original.
  1429. inline UT_Array(UT_Array<T> &&a) noexcept;
  1430. /// Construct based on given capacity and size
  1431. UT_Array(exint capacity, exint size)
  1432. {
  1433. myData = capacity ? allocateCapacity(capacity) : NULL;
  1434. if (capacity < size)
  1435. size = capacity;
  1436. mySize = size;
  1437. myCapacity = capacity;
  1438. trivialConstructRange(myData, mySize);
  1439. }
  1440. /// Construct based on given capacity with a size of 0
  1441. explicit UT_Array(exint capacity = 0) : myCapacity(capacity), mySize(0)
  1442. {
  1443. myData = capacity ? allocateCapacity(capacity) : NULL;
  1444. }
  1445. /// Construct with the contents of an initializer list
  1446. inline explicit UT_Array(std::initializer_list<T> init);
  1447. inline ~UT_Array();
  1448. inline void swap(UT_Array<T> &other);
  1449. /// Append an element to the current elements and return its index in the
  1450. /// array, or insert the element at a specified position; if necessary,
  1451. /// insert() grows the array to accommodate the element. The insert
  1452. /// methods use the assignment operator '=' to place the element into the
  1453. /// right spot; be aware that '=' works differently on objects and pointers.
  1454. /// The test for duplicates uses the logical equal operator '=='; as with
  1455. /// '=', the behaviour of the equality operator on pointers versus objects
  1456. /// is not the same.
  1457. /// Use the subscript operators instead of insert() if you are appending
  1458. /// to the array, or if you don't mind overwriting the element already
  1459. /// inserted at the given index.
  1460. exint append(void) { return insert(mySize); }
  1461. exint append(const T &t) { return appendImpl(t); }
  1462. exint append(T &&t) { return appendImpl(std::move(t)); }
  1463. inline void append(const T *pt, exint count);
  1464. inline void appendMultiple(const T &t, exint count);
  1465. inline exint insert(exint index);
  1466. exint insert(const T &t, exint i)
  1467. { return insertImpl(t, i); }
  1468. exint insert(T &&t, exint i)
  1469. { return insertImpl(std::move(t), i); }
  1470. /// Adds a new element to the array (resizing if necessary) and forwards
  1471. /// the given arguments to T's constructor.
  1472. /// NOTE: Unlike append(), the arguments cannot reference any existing
  1473. /// elements in the array. Checking for and handling such cases would
  1474. /// remove most of the performance gain versus append(T(...)). Debug builds
  1475. /// will assert that the arguments are valid.
  1476. template <typename... S>
  1477. inline exint emplace_back(S&&... s);
  1478. /// Takes another T array and concatenate it onto my end
  1479. inline exint concat(const UT_Array<T> &a);
  1480. /// Insert an element "count" times at the given index. Return the index.
  1481. inline exint multipleInsert(exint index, exint count);
  1482. /// An alias for unique element insertion at a certain index. Also used by
  1483. /// the other insertion methods.
  1484. exint insertAt(const T &t, exint index)
  1485. { return insertImpl(t, index); }
  1486. /// Return true if given index is valid.
  1487. bool isValidIndex(exint index) const
  1488. { return (index >= 0 && index < mySize); }
  1489. /// Remove one element from the array given its
  1490. /// position in the list, and fill the gap by shifting the elements down
  1491. /// by one position. Return the index of the element removed or -1 if
  1492. /// the index was out of bounds.
  1493. exint removeIndex(exint index)
  1494. {
  1495. return isValidIndex(index) ? removeAt(index) : -1;
  1496. }
  1497. void removeLast()
  1498. {
  1499. if (mySize) removeAt(mySize-1);
  1500. }
  1501. /// Remove the range [begin_i,end_i) of elements from the array.
  1502. inline void removeRange(exint begin_i, exint end_i);
  1503. /// Remove the range [begin_i, end_i) of elements from this array and place
  1504. /// them in the dest array, shrinking/growing the dest array as necessary.
  1505. inline void extractRange(exint begin_i, exint end_i,
  1506. UT_Array<T>& dest);
  1507. /// Removes all matching elements from the list, shuffling down and changing
  1508. /// the size appropriately.
  1509. /// Returns the number of elements left.
  1510. template <typename IsEqual>
  1511. inline exint removeIf(IsEqual is_equal);
  1512. /// Remove all matching elements. Also sets the capacity of the array.
  1513. template <typename IsEqual>
  1514. void collapseIf(IsEqual is_equal)
  1515. {
  1516. removeIf(is_equal);
  1517. setCapacity(size());
  1518. }
  1519. /// Move howMany objects starting at index srcIndex to destIndex;
  1520. /// This method will remove the elements at [srcIdx, srcIdx+howMany) and
  1521. /// then insert them at destIdx. This method can be used in place of
  1522. /// the old shift() operation.
  1523. inline void move(exint srcIdx, exint destIdx, exint howMany);
  1524. /// Cyclically shifts the entire array by howMany
  1525. inline void cycle(exint howMany);
  1526. /// Quickly set the array to a single value.
  1527. inline void constant(const T &v);
  1528. /// Zeros the array if a POD type, else trivial constructs if a class type.
  1529. inline void zero();
  1530. /// The fastest search possible, which does pointer arithmetic to find the
  1531. /// index of the element. WARNING: index() does no out-of-bounds checking.
  1532. exint index(const T &t) const { return &t - myData; }
  1533. exint safeIndex(const T &t) const
  1534. {
  1535. return (&t >= myData && &t < (myData + mySize))
  1536. ? &t - myData : -1;
  1537. }
  1538. /// Set the capacity of the array, i.e. grow it or shrink it. The
  1539. /// function copies the data after reallocating space for the array.
  1540. inline void setCapacity(exint newcapacity);
  1541. void setCapacityIfNeeded(exint mincapacity)
  1542. {
  1543. if (capacity() < mincapacity)
  1544. setCapacity(mincapacity);
  1545. }
  1546. /// If the capacity is smaller than mincapacity, expand the array
  1547. /// to at least mincapacity and to at least a constant factor of the
  1548. /// array's previous capacity, to avoid having a linear number of
  1549. /// reallocations in a linear number of calls to bumpCapacity.
  1550. void bumpCapacity(exint mincapacity)
  1551. {
  1552. if (capacity() >= mincapacity)
  1553. return;
  1554. // The following 4 lines are just
  1555. // SYSmax(mincapacity, UTbumpAlloc(capacity())), avoiding SYSmax
  1556. exint bumped = UTbumpAlloc(capacity());
  1557. exint newcapacity = mincapacity;
  1558. if (bumped > mincapacity)
  1559. newcapacity = bumped;
  1560. setCapacity(newcapacity);
  1561. }
  1562. /// First bumpCapacity to ensure that there's space for newsize,
  1563. /// expanding either not at all or by at least a constant factor
  1564. /// of the array's previous capacity,
  1565. /// then set the size to newsize.
  1566. void bumpSize(exint newsize)
  1567. {
  1568. bumpCapacity(newsize);
  1569. setSize(newsize);
  1570. }
  1571. /// NOTE: bumpEntries() will be deprecated in favour of bumpSize() in a
  1572. /// future version.
  1573. void bumpEntries(exint newsize)
  1574. {
  1575. bumpSize(newsize);
  1576. }
  1577. /// Query the capacity, i.e. the allocated length of the array.
  1578. /// NOTE: capacity() >= size().
  1579. exint capacity() const { return myCapacity; }
  1580. /// Query the size, i.e. the number of occupied elements in the array.
  1581. /// NOTE: capacity() >= size().
  1582. exint size() const { return mySize; }
  1583. /// Alias of size(). size() is preferred.
  1584. exint entries() const { return mySize; }
  1585. /// Returns true iff there are no occupied elements in the array.
  1586. bool isEmpty() const { return mySize==0; }
  1587. /// Set the size, the number of occupied elements in the array.
  1588. /// NOTE: This will not do bumpCapacity, so if you call this
  1589. /// n times to increase the size, it may take
  1590. /// n^2 time.
  1591. void setSize(exint newsize)
  1592. {
  1593. if (newsize < 0)
  1594. newsize = 0;
  1595. if (newsize == mySize)
  1596. return;
  1597. setCapacityIfNeeded(newsize);
  1598. if (mySize > newsize)
  1599. trivialDestructRange(myData + newsize, mySize - newsize);
  1600. else // newsize > mySize
  1601. trivialConstructRange(myData + mySize, newsize - mySize);
  1602. mySize = newsize;
  1603. }
  1604. /// Alias of setSize(). setSize() is preferred.
  1605. void entries(exint newsize)
  1606. {
  1607. setSize(newsize);
  1608. }
  1609. /// Set the size, but unlike setSize(newsize), this function
  1610. /// will not initialize new POD elements to zero. Non-POD data types
  1611. /// will still have their constructors called.
  1612. /// This function is faster than setSize(ne) if you intend to fill in
  1613. /// data for all elements.
  1614. void setSizeNoInit(exint newsize)
  1615. {
  1616. if (newsize < 0)
  1617. newsize = 0;
  1618. if (newsize == mySize)
  1619. return;
  1620. setCapacityIfNeeded(newsize);
  1621. if (mySize > newsize)
  1622. trivialDestructRange(myData + newsize, mySize - newsize);
  1623. else if (!isPOD()) // newsize > mySize
  1624. trivialConstructRange(myData + mySize, newsize - mySize);
  1625. mySize = newsize;
  1626. }
  1627. /// Decreases, but never expands, to the given maxsize.
  1628. void truncate(exint maxsize)
  1629. {
  1630. if (maxsize >= 0 && size() > maxsize)
  1631. setSize(maxsize);
  1632. }
  1633. /// Resets list to an empty list.
  1634. void clear() {
  1635. // Don't call setSize(0) since that would require a valid default
  1636. // constructor.
  1637. trivialDestructRange(myData, mySize);
  1638. mySize = 0;
  1639. }
  1640. /// Assign array a to this array by copying each of a's elements with
  1641. /// memcpy for POD types, and with copy construction for class types.
  1642. inline UT_Array<T> & operator=(const UT_Array<T> &a);
  1643. /// Replace the contents with those from the initializer_list ilist
  1644. inline UT_Array<T> & operator=(std::initializer_list<T> ilist);
  1645. /// Move the contents of array a to this array.
  1646. inline UT_Array<T> & operator=(UT_Array<T> &&a);
  1647. /// Compare two array and return true if they are equal and false otherwise.
  1648. /// Two elements are checked against each other using operator '==' or
  1649. /// compare() respectively.
  1650. /// NOTE: The capacities of the arrays are not checked when
  1651. /// determining whether they are equal.
  1652. inline bool operator==(const UT_Array<T> &a) const;
  1653. inline bool operator!=(const UT_Array<T> &a) const;
  1654. /// Subscript operator
  1655. /// NOTE: This does NOT do any bounds checking unless paranoid
  1656. /// asserts are enabled.
  1657. T & operator()(exint i)
  1658. {
  1659. UT_ASSERT_P(i >= 0 && i < mySize);
  1660. return myData[i];
  1661. }
  1662. /// Const subscript operator
  1663. /// NOTE: This does NOT do any bounds checking unless paranoid
  1664. /// asserts are enabled.
  1665. const T & operator()(exint i) const
  1666. {
  1667. UT_ASSERT_P(i >= 0 && i < mySize);
  1668. return myData[i];
  1669. }
  1670. /// Subscript operator
  1671. /// NOTE: This does NOT do any bounds checking unless paranoid
  1672. /// asserts are enabled.
  1673. T & operator[](exint i)
  1674. {
  1675. UT_ASSERT_P(i >= 0 && i < mySize);
  1676. return myData[i];
  1677. }
  1678. /// Const subscript operator
  1679. /// NOTE: This does NOT do any bounds checking unless paranoid
  1680. /// asserts are enabled.
  1681. const T & operator[](exint i) const
  1682. {
  1683. UT_ASSERT_P(i >= 0 && i < mySize);
  1684. return myData[i];
  1685. }
  1686. /// forcedRef(exint) will grow the array if necessary, initializing any
  1687. /// new elements to zero for POD types and default constructing for
  1688. /// class types.
  1689. T & forcedRef(exint i)
  1690. {
  1691. UT_ASSERT_P(i >= 0);
  1692. if (i >= mySize)
  1693. bumpSize(i+1);
  1694. return myData[i];
  1695. }
  1696. /// forcedGet(exint) does NOT grow the array, and will return default
  1697. /// objects for out of bound array indices.
  1698. T forcedGet(exint i) const
  1699. {
  1700. return (i >= 0 && i < mySize) ? myData[i] : T();
  1701. }
  1702. T & last()
  1703. {
  1704. UT_ASSERT_P(mySize);
  1705. return myData[mySize-1];
  1706. }
  1707. const T & last() const
  1708. {
  1709. UT_ASSERT_P(mySize);
  1710. return myData[mySize-1];
  1711. }
  1712. T * getArray() const { return myData; }
  1713. const T * getRawArray() const { return myData; }
  1714. T * array() { return myData; }
  1715. const T * array() const { return myData; }
  1716. T * data() { return myData; }
  1717. const T * data() const { return myData; }
  1718. /// This method allows you to swap in a new raw T array, which must be
  1719. /// the same size as myCapacity. Use caution with this method.
  1720. T * aliasArray(T *newdata)
  1721. { T *data = myData; myData = newdata; return data; }
  1722. template <typename IT, bool FORWARD>
  1723. class base_iterator :
  1724. public std::iterator<std::random_access_iterator_tag, T, exint>
  1725. {
  1726. public:
  1727. typedef IT& reference;
  1728. typedef IT* pointer;
  1729. // Note: When we drop gcc 4.4 support and allow range-based for
  1730. // loops, we should also drop atEnd(), which means we can drop
  1731. // myEnd here.
  1732. base_iterator() : myCurrent(NULL), myEnd(NULL) {}
  1733. // Allow iterator to const_iterator conversion
  1734. template<typename EIT>
  1735. base_iterator(const base_iterator<EIT, FORWARD> &src)
  1736. : myCurrent(src.myCurrent), myEnd(src.myEnd) {}
  1737. pointer operator->() const
  1738. { return FORWARD ? myCurrent : myCurrent - 1; }
  1739. reference operator*() const
  1740. { return FORWARD ? *myCurrent : myCurrent[-1]; }
  1741. reference item() const
  1742. { return FORWARD ? *myCurrent : myCurrent[-1]; }
  1743. reference operator[](exint n) const
  1744. { return FORWARD ? myCurrent[n] : myCurrent[-n - 1]; }
  1745. /// Pre-increment operator
  1746. base_iterator &operator++()
  1747. {
  1748. if (FORWARD) ++myCurrent; else --myCurrent;
  1749. return *this;
  1750. }
  1751. /// Post-increment operator
  1752. base_iterator operator++(int)
  1753. {
  1754. base_iterator tmp = *this;
  1755. if (FORWARD) ++myCurrent; else --myCurrent;
  1756. return tmp;
  1757. }
  1758. /// Pre-decrement operator
  1759. base_iterator &operator--()
  1760. {
  1761. if (FORWARD) --myCurrent; else ++myCurrent;
  1762. return *this;
  1763. }
  1764. /// Post-decrement operator
  1765. base_iterator operator--(int)
  1766. {
  1767. base_iterator tmp = *this;
  1768. if (FORWARD) --myCurrent; else ++myCurrent;
  1769. return tmp;
  1770. }
  1771. base_iterator &operator+=(exint n)
  1772. {
  1773. if (FORWARD)
  1774. myCurrent += n;
  1775. else
  1776. myCurrent -= n;
  1777. return *this;
  1778. }
  1779. base_iterator operator+(exint n) const
  1780. {
  1781. if (FORWARD)
  1782. return base_iterator(myCurrent + n, myEnd);
  1783. else
  1784. return base_iterator(myCurrent - n, myEnd);
  1785. }
  1786. base_iterator &operator-=(exint n)
  1787. { return (*this) += (-n); }
  1788. base_iterator operator-(exint n) const
  1789. { return (*this) + (-n); }
  1790. bool atEnd() const { return myCurrent == myEnd; }
  1791. void advance() { this->operator++(); }
  1792. // Comparators
  1793. template<typename ITR, bool FR>
  1794. bool operator==(const base_iterator<ITR, FR> &r) const
  1795. { return myCurrent == r.myCurrent; }
  1796. template<typename ITR, bool FR>
  1797. bool operator!=(const base_iterator<ITR, FR> &r) const
  1798. { return myCurrent != r.myCurrent; }
  1799. template<typename ITR>
  1800. bool operator<(const base_iterator<ITR, FORWARD> &r) const
  1801. {
  1802. if (FORWARD)
  1803. return myCurrent < r.myCurrent;
  1804. else
  1805. return r.myCurrent < myCurrent;
  1806. }
  1807. template<typename ITR>
  1808. bool operator>(const base_iterator<ITR, FORWARD> &r) const
  1809. {
  1810. if (FORWARD)
  1811. return myCurrent > r.myCurrent;
  1812. else
  1813. return r.myCurrent > myCurrent;
  1814. }
  1815. template<typename ITR>
  1816. bool operator<=(const base_iterator<ITR, FORWARD> &r) const
  1817. {
  1818. if (FORWARD)
  1819. return myCurrent <= r.myCurrent;
  1820. else
  1821. return r.myCurrent <= myCurrent;
  1822. }
  1823. template<typename ITR>
  1824. bool operator>=(const base_iterator<ITR, FORWARD> &r) const
  1825. {
  1826. if (FORWARD)
  1827. return myCurrent >= r.myCurrent;
  1828. else
  1829. return r.myCurrent >= myCurrent;
  1830. }
  1831. // Difference operator for std::distance
  1832. template<typename ITR>
  1833. exint operator-(const base_iterator<ITR, FORWARD> &r) const
  1834. {
  1835. if (FORWARD)
  1836. return exint(myCurrent - r.myCurrent);
  1837. else
  1838. return exint(r.myCurrent - myCurrent);
  1839. }
  1840. protected:
  1841. friend class UT_Array<T>;
  1842. base_iterator(IT *c, IT *e) : myCurrent(c), myEnd(e) {}
  1843. private:
  1844. IT *myCurrent;
  1845. IT *myEnd;
  1846. };
  1847. typedef base_iterator<T, true> iterator;
  1848. typedef base_iterator<const T, true> const_iterator;
  1849. typedef base_iterator<T, false> reverse_iterator;
  1850. typedef base_iterator<const T, false> const_reverse_iterator;
  1851. typedef const_iterator traverser; // For backward compatibility
  1852. /// Begin iterating over the array. The contents of the array may be
  1853. /// modified during the traversal.
  1854. iterator begin()
  1855. {
  1856. return iterator(myData, myData + mySize);
  1857. }
  1858. /// End iterator.
  1859. iterator end()
  1860. {
  1861. return iterator(myData + mySize,
  1862. myData + mySize);
  1863. }
  1864. /// Begin iterating over the array. The array may not be modified during
  1865. /// the traversal.
  1866. const_iterator begin() const
  1867. {
  1868. return const_iterator(myData, myData + mySize);
  1869. }
  1870. /// End const iterator. Consider using it.atEnd() instead.
  1871. const_iterator end() const
  1872. {
  1873. return const_iterator(myData + mySize,
  1874. myData + mySize);
  1875. }
  1876. /// Begin iterating over the array in reverse.
  1877. reverse_iterator rbegin()
  1878. {
  1879. return reverse_iterator(myData + mySize,
  1880. myData);
  1881. }
  1882. /// End reverse iterator.
  1883. reverse_iterator rend()
  1884. {
  1885. return reverse_iterator(myData, myData);
  1886. }
  1887. /// Begin iterating over the array in reverse.
  1888. const_reverse_iterator rbegin() const
  1889. {
  1890. return const_reverse_iterator(myData + mySize,
  1891. myData);
  1892. }
  1893. /// End reverse iterator. Consider using it.atEnd() instead.
  1894. const_reverse_iterator rend() const
  1895. {
  1896. return const_reverse_iterator(myData, myData);
  1897. }
  1898. /// Remove item specified by the reverse_iterator.
  1899. void removeItem(const reverse_iterator &it)
  1900. {
  1901. removeAt(&it.item() - myData);
  1902. }
  1903. /// Very dangerous methods to share arrays.
  1904. /// The array is not aware of the sharing, so ensure you clear
  1905. /// out the array prior a destructor or setCapacity operation.
  1906. void unsafeShareData(UT_Array<T> &src)
  1907. {
  1908. myData = src.myData;
  1909. myCapacity = src.myCapacity;
  1910. mySize = src.mySize;
  1911. }
  1912. void unsafeShareData(T *src, exint srcsize)
  1913. {
  1914. myData = src;
  1915. myCapacity = srcsize;
  1916. mySize = srcsize;
  1917. }
  1918. void unsafeShareData(T *src, exint size, exint capacity)
  1919. {
  1920. myData = src;
  1921. mySize = size;
  1922. myCapacity = capacity;
  1923. }
  1924. void unsafeClearData()
  1925. {
  1926. myData = NULL;
  1927. myCapacity = 0;
  1928. mySize = 0;
  1929. }
  1930. /// Returns true if the data used by the array was allocated on the heap.
  1931. inline bool isHeapBuffer() const
  1932. {
  1933. return (myData != (T *)(((char*)this) + sizeof(*this)));
  1934. }
  1935. inline bool isHeapBuffer(T* data) const
  1936. {
  1937. return (data != (T *)(((char*)this) + sizeof(*this)));
  1938. }
  1939. protected:
  1940. // Check whether T may have a constructor, destructor, or copy
  1941. // constructor. This test is conservative in that some POD types will
  1942. // not be recognized as POD by this function. To mark your type as POD,
  1943. // use the SYS_DECLARE_IS_POD() macro in SYS_TypeDecorate.h.
  1944. static constexpr SYS_FORCE_INLINE bool isPOD()
  1945. {
  1946. return std::is_pod<T>::value;
  1947. }
  1948. /// Implements both append(const T &) and append(T &&) via perfect
  1949. /// forwarding. Unlike the variadic emplace_back(), its argument may be a
  1950. /// reference to another element in the array.
  1951. template <typename S>
  1952. inline exint appendImpl(S &&s);
  1953. /// Similar to appendImpl() but for insertion.
  1954. template <typename S>
  1955. inline exint insertImpl(S &&s, exint index);
  1956. // Construct the given type
  1957. template <typename... S>
  1958. static void construct(T &dst, S&&... s)
  1959. {
  1960. new (&dst) T(std::forward<S>(s)...);
  1961. }
  1962. // Copy construct the given type
  1963. static void copyConstruct(T &dst, const T &src)
  1964. {
  1965. if (isPOD())
  1966. dst = src;
  1967. else
  1968. new (&dst) T(src);
  1969. }
  1970. static void copyConstructRange(T *dst, const T *src, exint n)
  1971. {
  1972. if (isPOD())
  1973. {
  1974. if (n > 0)
  1975. {
  1976. ::memcpy((void *)dst, (const void *)src,
  1977. n * sizeof(T));
  1978. }
  1979. }
  1980. else
  1981. {
  1982. for (exint i = 0; i < n; i++)
  1983. new (&dst[i]) T(src[i]);
  1984. }
  1985. }
  1986. /// Element Constructor
  1987. static void trivialConstruct(T &dst)
  1988. {
  1989. if (!isPOD())
  1990. new (&dst) T();
  1991. else
  1992. memset((void *)&dst, 0, sizeof(T));
  1993. }
  1994. static void trivialConstructRange(T *dst, exint n)
  1995. {
  1996. if (!isPOD())
  1997. {
  1998. for (exint i = 0; i < n; i++)
  1999. new (&dst[i]) T();
  2000. }
  2001. else if (n == 1)
  2002. {
  2003. // Special case for n == 1. If the size parameter
  2004. // passed to memset is known at compile time, this
  2005. // function call will be inlined. This results in
  2006. // much faster performance than a real memset
  2007. // function call which is required in the case
  2008. // below, where n is not known until runtime.
  2009. // This makes calls to append() much faster.
  2010. memset((void *)dst, 0, sizeof(T));
  2011. }
  2012. else
  2013. memset((void *)dst, 0, sizeof(T) * n);
  2014. }
  2015. /// Element Destructor
  2016. static void trivialDestruct(T &dst)
  2017. {
  2018. if (!isPOD())
  2019. dst.~T();
  2020. }
  2021. static void trivialDestructRange(T *dst, exint n)
  2022. {
  2023. if (!isPOD())
  2024. {
  2025. for (exint i = 0; i < n; i++)
  2026. dst[i].~T();
  2027. }
  2028. }
  2029. private:
  2030. /// Pointer to the array of elements of type T
  2031. T *myData;
  2032. /// The number of elements for which we have allocated memory
  2033. exint myCapacity;
  2034. /// The actual number of valid elements in the array
  2035. exint mySize;
  2036. // The guts of the remove() methods.
  2037. inline exint removeAt(exint index);
  2038. inline T * allocateCapacity(exint num_items);
  2039. };
  2040. }}
  2041. #endif // __UT_ARRAY_H_INCLUDED__
  2042. /*
  2043. * Copyright (c) 2018 Side Effects Software Inc.
  2044. *
  2045. * Permission is hereby granted, free of charge, to any person obtaining a copy
  2046. * of this software and associated documentation files (the "Software"), to deal
  2047. * in the Software without restriction, including without limitation the rights
  2048. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  2049. * copies of the Software, and to permit persons to whom the Software is
  2050. * furnished to do so, subject to the following conditions:
  2051. *
  2052. * The above copyright notice and this permission notice shall be included in all
  2053. * copies or substantial portions of the Software.
  2054. *
  2055. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  2056. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  2057. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  2058. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  2059. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  2060. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  2061. * SOFTWARE.
  2062. *
  2063. * COMMENTS:
  2064. * This is meant to be included by UT_Array.h and includes
  2065. * the template implementations needed by external code.
  2066. */
  2067. #pragma once
  2068. #ifndef __UT_ARRAYIMPL_H_INCLUDED__
  2069. #define __UT_ARRAYIMPL_H_INCLUDED__
  2070. #include <algorithm>
  2071. #include <utility>
  2072. #include <stdlib.h>
  2073. #include <string.h>
  2074. namespace igl {
  2075. /// @private
  2076. namespace FastWindingNumber {
  2077. // Implemented in UT_Array.C
  2078. extern void ut_ArrayImplFree(void *p);
  2079. template <typename T>
  2080. inline UT_Array<T>::UT_Array(const UT_Array<T> &a)
  2081. : myCapacity(a.size()), mySize(a.size())
  2082. {
  2083. if (myCapacity)
  2084. {
  2085. myData = allocateCapacity(myCapacity);
  2086. copyConstructRange(myData, a.array(), mySize);
  2087. }
  2088. else
  2089. {
  2090. myData = nullptr;
  2091. }
  2092. }
  2093. template <typename T>
  2094. inline UT_Array<T>::UT_Array(std::initializer_list<T> init)
  2095. : myCapacity(init.size()), mySize(init.size())
  2096. {
  2097. if (myCapacity)
  2098. {
  2099. myData = allocateCapacity(myCapacity);
  2100. copyConstructRange(myData, init.begin(), mySize);
  2101. }
  2102. else
  2103. {
  2104. myData = nullptr;
  2105. }
  2106. }
  2107. template <typename T>
  2108. inline UT_Array<T>::UT_Array(UT_Array<T> &&a) noexcept
  2109. {
  2110. if (!a.isHeapBuffer())
  2111. {
  2112. myData = nullptr;
  2113. myCapacity = 0;
  2114. mySize = 0;
  2115. operator=(std::move(a));
  2116. return;
  2117. }
  2118. myCapacity = a.myCapacity;
  2119. mySize = a.mySize;
  2120. myData = a.myData;
  2121. a.myCapacity = a.mySize = 0;
  2122. a.myData = nullptr;
  2123. }
  2124. template <typename T>
  2125. inline UT_Array<T>::~UT_Array()
  2126. {
  2127. // NOTE: We call setCapacity to ensure that we call trivialDestructRange,
  2128. // then call free on myData.
  2129. setCapacity(0);
  2130. }
  2131. template <typename T>
  2132. inline T *
  2133. UT_Array<T>::allocateCapacity(exint capacity)
  2134. {
  2135. T *data = (T *)malloc(capacity * sizeof(T));
  2136. // Avoid degenerate case if we happen to be aliased the wrong way
  2137. if (!isHeapBuffer(data))
  2138. {
  2139. T *prev = data;
  2140. data = (T *)malloc(capacity * sizeof(T));
  2141. ut_ArrayImplFree(prev);
  2142. }
  2143. return data;
  2144. }
  2145. template <typename T>
  2146. inline void
  2147. UT_Array<T>::swap( UT_Array<T> &other )
  2148. {
  2149. std::swap( myData, other.myData );
  2150. std::swap( myCapacity, other.myCapacity );
  2151. std::swap( mySize, other.mySize );
  2152. }
  2153. template <typename T>
  2154. inline exint
  2155. UT_Array<T>::insert(exint index)
  2156. {
  2157. if (index >= mySize)
  2158. {
  2159. bumpCapacity(index + 1);
  2160. trivialConstructRange(myData + mySize, index - mySize + 1);
  2161. mySize = index+1;
  2162. return index;
  2163. }
  2164. bumpCapacity(mySize + 1);
  2165. UT_ASSERT_P(index >= 0);
  2166. ::memmove((void *)&myData[index+1], (void *)&myData[index],
  2167. ((mySize-index)*sizeof(T)));
  2168. trivialConstruct(myData[index]);
  2169. mySize++;
  2170. return index;
  2171. }
  2172. template <typename T>
  2173. template <typename S>
  2174. inline exint
  2175. UT_Array<T>::appendImpl(S &&s)
  2176. {
  2177. if (mySize == myCapacity)
  2178. {
  2179. exint idx = safeIndex(s);
  2180. // NOTE: UTbumpAlloc always returns a strictly larger value.
  2181. setCapacity(UTbumpAlloc(myCapacity));
  2182. if (idx >= 0)
  2183. construct(myData[mySize], std::forward<S>(myData[idx]));
  2184. else
  2185. construct(myData[mySize], std::forward<S>(s));
  2186. }
  2187. else
  2188. {
  2189. construct(myData[mySize], std::forward<S>(s));
  2190. }
  2191. return mySize++;
  2192. }
  2193. template <typename T>
  2194. template <typename... S>
  2195. inline exint
  2196. UT_Array<T>::emplace_back(S&&... s)
  2197. {
  2198. if (mySize == myCapacity)
  2199. setCapacity(UTbumpAlloc(myCapacity));
  2200. construct(myData[mySize], std::forward<S>(s)...);
  2201. return mySize++;
  2202. }
  2203. template <typename T>
  2204. inline void
  2205. UT_Array<T>::append(const T *pt, exint count)
  2206. {
  2207. bumpCapacity(mySize + count);
  2208. copyConstructRange(myData + mySize, pt, count);
  2209. mySize += count;
  2210. }
  2211. template <typename T>
  2212. inline void
  2213. UT_Array<T>::appendMultiple(const T &t, exint count)
  2214. {
  2215. UT_ASSERT_P(count >= 0);
  2216. if (count <= 0)
  2217. return;
  2218. if (mySize + count >= myCapacity)
  2219. {
  2220. exint tidx = safeIndex(t);
  2221. bumpCapacity(mySize + count);
  2222. for (exint i = 0; i < count; i++)
  2223. copyConstruct(myData[mySize+i], tidx >= 0 ? myData[tidx] : t);
  2224. }
  2225. else
  2226. {
  2227. for (exint i = 0; i < count; i++)
  2228. copyConstruct(myData[mySize+i], t);
  2229. }
  2230. mySize += count;
  2231. }
  2232. template <typename T>
  2233. inline exint
  2234. UT_Array<T>::concat(const UT_Array<T> &a)
  2235. {
  2236. bumpCapacity(mySize + a.mySize);
  2237. copyConstructRange(myData + mySize, a.myData, a.mySize);
  2238. mySize += a.mySize;
  2239. return mySize;
  2240. }
  2241. template <typename T>
  2242. inline exint
  2243. UT_Array<T>::multipleInsert(exint beg_index, exint count)
  2244. {
  2245. exint end_index = beg_index + count;
  2246. if (beg_index >= mySize)
  2247. {
  2248. bumpCapacity(end_index);
  2249. trivialConstructRange(myData + mySize, end_index - mySize);
  2250. mySize = end_index;
  2251. return beg_index;
  2252. }
  2253. bumpCapacity(mySize+count);
  2254. ::memmove((void *)&myData[end_index], (void *)&myData[beg_index],
  2255. ((mySize-beg_index)*sizeof(T)));
  2256. mySize += count;
  2257. trivialConstructRange(myData + beg_index, count);
  2258. return beg_index;
  2259. }
  2260. template <typename T>
  2261. template <typename S>
  2262. inline exint
  2263. UT_Array<T>::insertImpl(S &&s, exint index)
  2264. {
  2265. if (index == mySize)
  2266. {
  2267. // This case avoids an extraneous call to trivialConstructRange()
  2268. // which the compiler may not optimize out.
  2269. (void) appendImpl(std::forward<S>(s));
  2270. }
  2271. else if (index > mySize)
  2272. {
  2273. exint src_i = safeIndex(s);
  2274. bumpCapacity(index + 1);
  2275. trivialConstructRange(myData + mySize, index - mySize);
  2276. if (src_i >= 0)
  2277. construct(myData[index], std::forward<S>(myData[src_i]));
  2278. else
  2279. construct(myData[index], std::forward<S>(s));
  2280. mySize = index + 1;
  2281. }
  2282. else // (index < mySize)
  2283. {
  2284. exint src_i = safeIndex(s);
  2285. bumpCapacity(mySize + 1);
  2286. ::memmove((void *)&myData[index+1], (void *)&myData[index],
  2287. ((mySize-index)*sizeof(T)));
  2288. if (src_i >= index)
  2289. ++src_i;
  2290. if (src_i >= 0)
  2291. construct(myData[index], std::forward<S>(myData[src_i]));
  2292. else
  2293. construct(myData[index], std::forward<S>(s));
  2294. ++mySize;
  2295. }
  2296. return index;
  2297. }
  2298. template <typename T>
  2299. inline exint
  2300. UT_Array<T>::removeAt(exint idx)
  2301. {
  2302. trivialDestruct(myData[idx]);
  2303. if (idx != --mySize)
  2304. {
  2305. ::memmove((void *)&myData[idx], (void *)&myData[idx+1],
  2306. ((mySize-idx)*sizeof(T)));
  2307. }
  2308. return idx;
  2309. }
  2310. template <typename T>
  2311. inline void
  2312. UT_Array<T>::removeRange(exint begin_i, exint end_i)
  2313. {
  2314. UT_ASSERT(begin_i <= end_i);
  2315. UT_ASSERT(end_i <= size());
  2316. if (end_i < size())
  2317. {
  2318. trivialDestructRange(myData + begin_i, end_i - begin_i);
  2319. ::memmove((void *)&myData[begin_i], (void *)&myData[end_i],
  2320. (mySize - end_i)*sizeof(T));
  2321. }
  2322. setSize(mySize - (end_i - begin_i));
  2323. }
  2324. template <typename T>
  2325. inline void
  2326. UT_Array<T>::extractRange(exint begin_i, exint end_i, UT_Array<T>& dest)
  2327. {
  2328. UT_ASSERT_P(begin_i >= 0);
  2329. UT_ASSERT_P(begin_i <= end_i);
  2330. UT_ASSERT_P(end_i <= size());
  2331. UT_ASSERT(this != &dest);
  2332. exint nelements = end_i - begin_i;
  2333. // grow the raw array if necessary.
  2334. dest.setCapacityIfNeeded(nelements);
  2335. ::memmove((void*)dest.myData, (void*)&myData[begin_i],
  2336. nelements * sizeof(T));
  2337. dest.mySize = nelements;
  2338. // we just asserted this was true, but just in case
  2339. if (this != &dest)
  2340. {
  2341. if (end_i < size())
  2342. {
  2343. ::memmove((void*)&myData[begin_i], (void*)&myData[end_i],
  2344. (mySize - end_i) * sizeof(T));
  2345. }
  2346. setSize(mySize - nelements);
  2347. }
  2348. }
  2349. template <typename T>
  2350. inline void
  2351. UT_Array<T>::move(exint srcIdx, exint destIdx, exint howMany)
  2352. {
  2353. // Make sure all the parameters are valid.
  2354. if( srcIdx < 0 )
  2355. srcIdx = 0;
  2356. if( destIdx < 0 )
  2357. destIdx = 0;
  2358. // If we are told to move a set of elements that would extend beyond the
  2359. // end of the current array, trim the group.
  2360. if( srcIdx + howMany > size() )
  2361. howMany = size() - srcIdx;
  2362. // If the destIdx would have us move the source beyond the end of the
  2363. // current array, move the destIdx back.
  2364. if( destIdx + howMany > size() )
  2365. destIdx = size() - howMany;
  2366. if( srcIdx != destIdx && howMany > 0 )
  2367. {
  2368. void **tmp = 0;
  2369. exint savelen;
  2370. savelen = SYSabs(srcIdx - destIdx);
  2371. tmp = (void **)::malloc(savelen*sizeof(T));
  2372. if( srcIdx > destIdx && howMany > 0 )
  2373. {
  2374. // We're moving the group backwards. Save all the stuff that
  2375. // we would overwrite, plus everything beyond that to the
  2376. // start of the source group. Then move the source group, then
  2377. // tack the saved data onto the end of the moved group.
  2378. ::memcpy(tmp, (void *)&myData[destIdx], (savelen*sizeof(T)));
  2379. ::memmove((void *)&myData[destIdx], (void *)&myData[srcIdx],
  2380. (howMany*sizeof(T)));
  2381. ::memcpy((void *)&myData[destIdx+howMany], tmp, (savelen*sizeof(T)));
  2382. }
  2383. if( srcIdx < destIdx && howMany > 0 )
  2384. {
  2385. // We're moving the group forwards. Save from the end of the
  2386. // group being moved to the end of the where the destination
  2387. // group will end up. Then copy the source to the destination.
  2388. // Then move back up to the original source location and drop
  2389. // in our saved data.
  2390. ::memcpy(tmp, (void *)&myData[srcIdx+howMany], (savelen*sizeof(T)));
  2391. ::memmove((void *)&myData[destIdx], (void *)&myData[srcIdx],
  2392. (howMany*sizeof(T)));
  2393. ::memcpy((void *)&myData[srcIdx], tmp, (savelen*sizeof(T)));
  2394. }
  2395. ::free(tmp);
  2396. }
  2397. }
  2398. template <typename T>
  2399. template <typename IsEqual>
  2400. inline exint
  2401. UT_Array<T>::removeIf(IsEqual is_equal)
  2402. {
  2403. // Move dst to the first element to remove.
  2404. exint dst;
  2405. for (dst = 0; dst < mySize; dst++)
  2406. {
  2407. if (is_equal(myData[dst]))
  2408. break;
  2409. }
  2410. // Now start looking at all the elements past the first one to remove.
  2411. for (exint idx = dst+1; idx < mySize; idx++)
  2412. {
  2413. if (!is_equal(myData[idx]))
  2414. {
  2415. UT_ASSERT(idx != dst);
  2416. myData[dst] = myData[idx];
  2417. dst++;
  2418. }
  2419. // On match, ignore.
  2420. }
  2421. // New size
  2422. mySize = dst;
  2423. return mySize;
  2424. }
  2425. template <typename T>
  2426. inline void
  2427. UT_Array<T>::cycle(exint howMany)
  2428. {
  2429. char *tempPtr;
  2430. exint numShift; // The number of items we shift
  2431. exint remaining; // mySize - numShift
  2432. if (howMany == 0 || mySize < 1) return;
  2433. numShift = howMany % (exint)mySize;
  2434. if (numShift < 0) numShift += mySize;
  2435. remaining = mySize - numShift;
  2436. tempPtr = new char[numShift*sizeof(T)];
  2437. ::memmove(tempPtr, (void *)&myData[remaining], (numShift * sizeof(T)));
  2438. ::memmove((void *)&myData[numShift], (void *)&myData[0], (remaining * sizeof(T)));
  2439. ::memmove((void *)&myData[0], tempPtr, (numShift * sizeof(T)));
  2440. delete [] tempPtr;
  2441. }
  2442. template <typename T>
  2443. inline void
  2444. UT_Array<T>::constant(const T &value)
  2445. {
  2446. for (exint i = 0; i < mySize; i++)
  2447. {
  2448. myData[i] = value;
  2449. }
  2450. }
  2451. template <typename T>
  2452. inline void
  2453. UT_Array<T>::zero()
  2454. {
  2455. if (isPOD())
  2456. ::memset((void *)myData, 0, mySize*sizeof(T));
  2457. else
  2458. trivialConstructRange(myData, mySize);
  2459. }
  2460. template <typename T>
  2461. inline void
  2462. UT_Array<T>::setCapacity(exint capacity)
  2463. {
  2464. // Do nothing when new capacity is the same as the current
  2465. if (capacity == myCapacity)
  2466. return;
  2467. // Special case for non-heap buffers
  2468. if (!isHeapBuffer())
  2469. {
  2470. if (capacity < mySize)
  2471. {
  2472. // Destroy the extra elements without changing myCapacity
  2473. trivialDestructRange(myData + capacity, mySize - capacity);
  2474. mySize = capacity;
  2475. }
  2476. else if (capacity > myCapacity)
  2477. {
  2478. T *prev = myData;
  2479. myData = (T *)malloc(sizeof(T) * capacity);
  2480. // myData is safe because we're already a stack buffer
  2481. UT_ASSERT_P(isHeapBuffer());
  2482. if (mySize > 0)
  2483. memcpy((void *)myData, (void *)prev, sizeof(T) * mySize);
  2484. myCapacity = capacity;
  2485. }
  2486. else
  2487. {
  2488. // Keep myCapacity unchanged in this case
  2489. UT_ASSERT_P(capacity >= mySize && capacity <= myCapacity);
  2490. }
  2491. return;
  2492. }
  2493. if (capacity == 0)
  2494. {
  2495. if (myData)
  2496. {
  2497. trivialDestructRange(myData, mySize);
  2498. free(myData);
  2499. }
  2500. myData = 0;
  2501. myCapacity = 0;
  2502. mySize = 0;
  2503. return;
  2504. }
  2505. if (capacity < mySize)
  2506. {
  2507. trivialDestructRange(myData + capacity, mySize - capacity);
  2508. mySize = capacity;
  2509. }
  2510. if (myData)
  2511. myData = (T *)realloc(myData, capacity*sizeof(T));
  2512. else
  2513. myData = (T *)malloc(sizeof(T) * capacity);
  2514. // Avoid degenerate case if we happen to be aliased the wrong way
  2515. if (!isHeapBuffer())
  2516. {
  2517. T *prev = myData;
  2518. myData = (T *)malloc(sizeof(T) * capacity);
  2519. if (mySize > 0)
  2520. memcpy((void *)myData, (void *)prev, sizeof(T) * mySize);
  2521. ut_ArrayImplFree(prev);
  2522. }
  2523. myCapacity = capacity;
  2524. UT_ASSERT(myData);
  2525. }
  2526. template <typename T>
  2527. inline UT_Array<T> &
  2528. UT_Array<T>::operator=(const UT_Array<T> &a)
  2529. {
  2530. if (this == &a)
  2531. return *this;
  2532. // Grow the raw array if necessary.
  2533. setCapacityIfNeeded(a.size());
  2534. // Make sure destructors and constructors are called on all elements
  2535. // being removed/added.
  2536. trivialDestructRange(myData, mySize);
  2537. copyConstructRange(myData, a.myData, a.size());
  2538. mySize = a.size();
  2539. return *this;
  2540. }
  2541. template <typename T>
  2542. inline UT_Array<T> &
  2543. UT_Array<T>::operator=(std::initializer_list<T> a)
  2544. {
  2545. const exint new_size = a.size();
  2546. // Grow the raw array if necessary.
  2547. setCapacityIfNeeded(new_size);
  2548. // Make sure destructors and constructors are called on all elements
  2549. // being removed/added.
  2550. trivialDestructRange(myData, mySize);
  2551. copyConstructRange(myData, a.begin(), new_size);
  2552. mySize = new_size;
  2553. return *this;
  2554. }
  2555. template <typename T>
  2556. inline UT_Array<T> &
  2557. UT_Array<T>::operator=(UT_Array<T> &&a)
  2558. {
  2559. if (!a.isHeapBuffer())
  2560. {
  2561. // Cannot steal from non-heap buffers
  2562. clear();
  2563. const exint n = a.size();
  2564. setCapacityIfNeeded(n);
  2565. if (isPOD())
  2566. {
  2567. if (n > 0)
  2568. memcpy(myData, a.myData, n * sizeof(T));
  2569. }
  2570. else
  2571. {
  2572. for (exint i = 0; i < n; ++i)
  2573. new (&myData[i]) T(std::move(a.myData[i]));
  2574. }
  2575. mySize = a.mySize;
  2576. a.mySize = 0;
  2577. return *this;
  2578. }
  2579. // else, just steal even if we're a small buffer
  2580. // Destroy all the elements we're currently holding.
  2581. if (myData)
  2582. {
  2583. trivialDestructRange(myData, mySize);
  2584. if (isHeapBuffer())
  2585. ::free(myData);
  2586. }
  2587. // Move the contents of the other array to us and empty the other container
  2588. // so that it destructs cleanly.
  2589. myCapacity = a.myCapacity;
  2590. mySize = a.mySize;
  2591. myData = a.myData;
  2592. a.myCapacity = a.mySize = 0;
  2593. a.myData = nullptr;
  2594. return *this;
  2595. }
  2596. template <typename T>
  2597. inline bool
  2598. UT_Array<T>::operator==(const UT_Array<T> &a) const
  2599. {
  2600. if (this == &a) return true;
  2601. if (mySize != a.size()) return false;
  2602. for (exint i = 0; i < mySize; i++)
  2603. if (!(myData[i] == a(i))) return false;
  2604. return true;
  2605. }
  2606. template <typename T>
  2607. inline bool
  2608. UT_Array<T>::operator!=(const UT_Array<T> &a) const
  2609. {
  2610. return (!operator==(a));
  2611. }
  2612. }}
  2613. #endif // __UT_ARRAYIMPL_H_INCLUDED__
  2614. /*
  2615. * Copyright (c) 2018 Side Effects Software Inc.
  2616. *
  2617. * Permission is hereby granted, free of charge, to any person obtaining a copy
  2618. * of this software and associated documentation files (the "Software"), to deal
  2619. * in the Software without restriction, including without limitation the rights
  2620. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  2621. * copies of the Software, and to permit persons to whom the Software is
  2622. * furnished to do so, subject to the following conditions:
  2623. *
  2624. * The above copyright notice and this permission notice shall be included in all
  2625. * copies or substantial portions of the Software.
  2626. *
  2627. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  2628. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  2629. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  2630. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  2631. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  2632. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  2633. * SOFTWARE.
  2634. *
  2635. * COMMENTS:
  2636. * Special case for arrays that are usually small,
  2637. * to avoid a heap allocation when the array really is small.
  2638. */
  2639. #pragma once
  2640. #ifndef __UT_SMALLARRAY_H_INCLUDED__
  2641. #define __UT_SMALLARRAY_H_INCLUDED__
  2642. #include <utility>
  2643. #include <stddef.h>
  2644. namespace igl {
  2645. /// @private
  2646. namespace FastWindingNumber {
  2647. /// An array class with the small buffer optimization, making it ideal for
  2648. /// cases when you know it will only contain a few elements at the expense of
  2649. /// increasing the object size by MAX_BYTES (subject to alignment).
  2650. template <typename T, size_t MAX_BYTES = 64>
  2651. class UT_SmallArray : public UT_Array<T>
  2652. {
  2653. // As many elements that fit into MAX_BYTES with 1 item minimum
  2654. enum { MAX_ELEMS = MAX_BYTES/sizeof(T) < 1 ? 1 : MAX_BYTES/sizeof(T) };
  2655. public:
  2656. // gcc falsely warns about our use of offsetof() on non-POD types. We can't
  2657. // easily suppress this because it has to be done in the caller at
  2658. // instantiation time. Instead, punt to a runtime check instead.
  2659. #if defined(__clang__) || defined(_MSC_VER)
  2660. #define UT_SMALL_ARRAY_SIZE_ASSERT() \
  2661. using ThisT = UT_SmallArray<T,MAX_BYTES>; \
  2662. static_assert(offsetof(ThisT, myBuffer) == sizeof(UT_Array<T>), \
  2663. "In order for UT_Array's checks for whether it needs to free the buffer to work, " \
  2664. "the buffer must be exactly following the base class memory.")
  2665. #else
  2666. #define UT_SMALL_ARRAY_SIZE_ASSERT() \
  2667. UT_ASSERT_P(!UT_Array<T>::isHeapBuffer());
  2668. #endif
  2669. /// Default construction
  2670. UT_SmallArray()
  2671. : UT_Array<T>(/*capacity*/0)
  2672. {
  2673. UT_Array<T>::unsafeShareData((T*)myBuffer, 0, MAX_ELEMS);
  2674. UT_SMALL_ARRAY_SIZE_ASSERT();
  2675. }
  2676. /// Copy constructor
  2677. /// @{
  2678. explicit UT_SmallArray(const UT_Array<T> &copy)
  2679. : UT_Array<T>(/*capacity*/0)
  2680. {
  2681. UT_Array<T>::unsafeShareData((T*)myBuffer, 0, MAX_ELEMS);
  2682. UT_SMALL_ARRAY_SIZE_ASSERT();
  2683. UT_Array<T>::operator=(copy);
  2684. }
  2685. explicit UT_SmallArray(const UT_SmallArray<T,MAX_BYTES> &copy)
  2686. : UT_Array<T>(/*capacity*/0)
  2687. {
  2688. UT_Array<T>::unsafeShareData((T*)myBuffer, 0, MAX_ELEMS);
  2689. UT_SMALL_ARRAY_SIZE_ASSERT();
  2690. UT_Array<T>::operator=(copy);
  2691. }
  2692. /// @}
  2693. /// Move constructor
  2694. /// @{
  2695. UT_SmallArray(UT_Array<T> &&movable) noexcept
  2696. {
  2697. UT_Array<T>::unsafeShareData((T*)myBuffer, 0, MAX_ELEMS);
  2698. UT_SMALL_ARRAY_SIZE_ASSERT();
  2699. UT_Array<T>::operator=(std::move(movable));
  2700. }
  2701. UT_SmallArray(UT_SmallArray<T,MAX_BYTES> &&movable) noexcept
  2702. {
  2703. UT_Array<T>::unsafeShareData((T*)myBuffer, 0, MAX_ELEMS);
  2704. UT_SMALL_ARRAY_SIZE_ASSERT();
  2705. UT_Array<T>::operator=(std::move(movable));
  2706. }
  2707. /// @}
  2708. /// Initializer list constructor
  2709. explicit UT_SmallArray(std::initializer_list<T> init)
  2710. {
  2711. UT_Array<T>::unsafeShareData((T*)myBuffer, 0, MAX_ELEMS);
  2712. UT_SMALL_ARRAY_SIZE_ASSERT();
  2713. UT_Array<T>::operator=(init);
  2714. }
  2715. #undef UT_SMALL_ARRAY_SIZE_ASSERT
  2716. /// Assignment operator
  2717. /// @{
  2718. UT_SmallArray<T,MAX_BYTES> &
  2719. operator=(const UT_SmallArray<T,MAX_BYTES> &copy)
  2720. {
  2721. UT_Array<T>::operator=(copy);
  2722. return *this;
  2723. }
  2724. UT_SmallArray<T,MAX_BYTES> &
  2725. operator=(const UT_Array<T> &copy)
  2726. {
  2727. UT_Array<T>::operator=(copy);
  2728. return *this;
  2729. }
  2730. /// @}
  2731. /// Move operator
  2732. /// @{
  2733. UT_SmallArray<T,MAX_BYTES> &
  2734. operator=(UT_SmallArray<T,MAX_BYTES> &&movable)
  2735. {
  2736. UT_Array<T>::operator=(std::move(movable));
  2737. return *this;
  2738. }
  2739. UT_SmallArray<T,MAX_BYTES> &
  2740. operator=(UT_Array<T> &&movable)
  2741. {
  2742. UT_Array<T>::operator=(std::move(movable));
  2743. return *this;
  2744. }
  2745. /// @}
  2746. UT_SmallArray<T,MAX_BYTES> &
  2747. operator=(std::initializer_list<T> src)
  2748. {
  2749. UT_Array<T>::operator=(src);
  2750. return *this;
  2751. }
  2752. private:
  2753. alignas(T) char myBuffer[MAX_ELEMS*sizeof(T)];
  2754. };
  2755. }}
  2756. #endif // __UT_SMALLARRAY_H_INCLUDED__
  2757. /*
  2758. * Copyright (c) 2018 Side Effects Software Inc.
  2759. *
  2760. * Permission is hereby granted, free of charge, to any person obtaining a copy
  2761. * of this software and associated documentation files (the "Software"), to deal
  2762. * in the Software without restriction, including without limitation the rights
  2763. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  2764. * copies of the Software, and to permit persons to whom the Software is
  2765. * furnished to do so, subject to the following conditions:
  2766. *
  2767. * The above copyright notice and this permission notice shall be included in all
  2768. * copies or substantial portions of the Software.
  2769. *
  2770. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  2771. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  2772. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  2773. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  2774. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  2775. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  2776. * SOFTWARE.
  2777. *
  2778. * COMMENTS:
  2779. * A vector class templated on its size and data type.
  2780. */
  2781. #pragma once
  2782. #ifndef __UT_FixedVector__
  2783. #define __UT_FixedVector__
  2784. namespace igl {
  2785. /// @private
  2786. namespace FastWindingNumber {
  2787. template<typename T,exint SIZE,bool INSTANTIATED=false>
  2788. class UT_FixedVector
  2789. {
  2790. public:
  2791. typedef UT_FixedVector<T,SIZE,INSTANTIATED> ThisType;
  2792. typedef T value_type;
  2793. typedef T theType;
  2794. static const exint theSize = SIZE;
  2795. T vec[SIZE];
  2796. SYS_FORCE_INLINE UT_FixedVector() = default;
  2797. /// Initializes every component to the same value
  2798. SYS_FORCE_INLINE explicit UT_FixedVector(T that) noexcept
  2799. {
  2800. for (exint i = 0; i < SIZE; ++i)
  2801. vec[i] = that;
  2802. }
  2803. SYS_FORCE_INLINE UT_FixedVector(const ThisType &that) = default;
  2804. SYS_FORCE_INLINE UT_FixedVector(ThisType &&that) = default;
  2805. /// Converts vector of S into vector of T,
  2806. /// or just copies if same type.
  2807. template<typename S,bool S_INSTANTIATED>
  2808. SYS_FORCE_INLINE UT_FixedVector(const UT_FixedVector<S,SIZE,S_INSTANTIATED> &that) noexcept
  2809. {
  2810. for (exint i = 0; i < SIZE; ++i)
  2811. vec[i] = that[i];
  2812. }
  2813. template<typename S>
  2814. SYS_FORCE_INLINE UT_FixedVector(const S that[SIZE]) noexcept
  2815. {
  2816. for (exint i = 0; i < SIZE; ++i)
  2817. vec[i] = that[i];
  2818. }
  2819. SYS_FORCE_INLINE const T &operator[](exint i) const noexcept
  2820. {
  2821. UT_ASSERT_P(i >= 0 && i < SIZE);
  2822. return vec[i];
  2823. }
  2824. SYS_FORCE_INLINE T &operator[](exint i) noexcept
  2825. {
  2826. UT_ASSERT_P(i >= 0 && i < SIZE);
  2827. return vec[i];
  2828. }
  2829. SYS_FORCE_INLINE constexpr const T *data() const noexcept
  2830. {
  2831. return vec;
  2832. }
  2833. SYS_FORCE_INLINE T *data() noexcept
  2834. {
  2835. return vec;
  2836. }
  2837. SYS_FORCE_INLINE ThisType &operator=(const ThisType &that) = default;
  2838. SYS_FORCE_INLINE ThisType &operator=(ThisType &&that) = default;
  2839. template <typename S,bool S_INSTANTIATED>
  2840. SYS_FORCE_INLINE ThisType &operator=(const UT_FixedVector<S,SIZE,S_INSTANTIATED> &that) noexcept
  2841. {
  2842. for (exint i = 0; i < SIZE; ++i)
  2843. vec[i] = that[i];
  2844. return *this;
  2845. }
  2846. SYS_FORCE_INLINE const ThisType &operator=(T that) noexcept
  2847. {
  2848. for (exint i = 0; i < SIZE; ++i)
  2849. vec[i] = that;
  2850. return *this;
  2851. }
  2852. template<typename S,bool S_INSTANTIATED>
  2853. SYS_FORCE_INLINE void operator+=(const UT_FixedVector<S,SIZE,S_INSTANTIATED> &that)
  2854. {
  2855. for (exint i = 0; i < SIZE; ++i)
  2856. vec[i] += that[i];
  2857. }
  2858. SYS_FORCE_INLINE void operator+=(T that)
  2859. {
  2860. for (exint i = 0; i < SIZE; ++i)
  2861. vec[i] += that;
  2862. }
  2863. template<typename S,bool S_INSTANTIATED>
  2864. SYS_FORCE_INLINE auto operator+(const UT_FixedVector<S,SIZE,S_INSTANTIATED> &that) const -> UT_FixedVector<decltype(vec[0]+that[0]),SIZE>
  2865. {
  2866. using Type = decltype(vec[0]+that[0]);
  2867. UT_FixedVector<Type,SIZE> result;
  2868. for (exint i = 0; i < SIZE; ++i)
  2869. result[i] = vec[i] + that[i];
  2870. return result;
  2871. }
  2872. template<typename S,bool S_INSTANTIATED>
  2873. SYS_FORCE_INLINE void operator-=(const UT_FixedVector<S,SIZE,S_INSTANTIATED> &that)
  2874. {
  2875. for (exint i = 0; i < SIZE; ++i)
  2876. vec[i] -= that[i];
  2877. }
  2878. SYS_FORCE_INLINE void operator-=(T that)
  2879. {
  2880. for (exint i = 0; i < SIZE; ++i)
  2881. vec[i] -= that;
  2882. }
  2883. template<typename S,bool S_INSTANTIATED>
  2884. SYS_FORCE_INLINE auto operator-(const UT_FixedVector<S,SIZE,S_INSTANTIATED> &that) const -> UT_FixedVector<decltype(vec[0]-that[0]),SIZE>
  2885. {
  2886. using Type = decltype(vec[0]-that[0]);
  2887. UT_FixedVector<Type,SIZE> result;
  2888. for (exint i = 0; i < SIZE; ++i)
  2889. result[i] = vec[i] - that[i];
  2890. return result;
  2891. }
  2892. template<typename S,bool S_INSTANTIATED>
  2893. SYS_FORCE_INLINE void operator*=(const UT_FixedVector<S,SIZE,S_INSTANTIATED> &that)
  2894. {
  2895. for (exint i = 0; i < SIZE; ++i)
  2896. vec[i] *= that[i];
  2897. }
  2898. template<typename S,bool S_INSTANTIATED>
  2899. SYS_FORCE_INLINE auto operator*(const UT_FixedVector<S,SIZE,S_INSTANTIATED> &that) const -> UT_FixedVector<decltype(vec[0]*that[0]),SIZE>
  2900. {
  2901. using Type = decltype(vec[0]*that[0]);
  2902. UT_FixedVector<Type,SIZE> result;
  2903. for (exint i = 0; i < SIZE; ++i)
  2904. result[i] = vec[i] * that[i];
  2905. return result;
  2906. }
  2907. SYS_FORCE_INLINE void operator*=(T that)
  2908. {
  2909. for (exint i = 0; i < SIZE; ++i)
  2910. vec[i] *= that;
  2911. }
  2912. SYS_FORCE_INLINE UT_FixedVector<T,SIZE> operator*(T that) const
  2913. {
  2914. UT_FixedVector<T,SIZE> result;
  2915. for (exint i = 0; i < SIZE; ++i)
  2916. result[i] = vec[i] * that;
  2917. return result;
  2918. }
  2919. template<typename S,bool S_INSTANTIATED>
  2920. SYS_FORCE_INLINE void operator/=(const UT_FixedVector<S,SIZE,S_INSTANTIATED> &that)
  2921. {
  2922. for (exint i = 0; i < SIZE; ++i)
  2923. vec[i] /= that[i];
  2924. }
  2925. template<typename S,bool S_INSTANTIATED>
  2926. SYS_FORCE_INLINE auto operator/(const UT_FixedVector<S,SIZE,S_INSTANTIATED> &that) const -> UT_FixedVector<decltype(vec[0]/that[0]),SIZE>
  2927. {
  2928. using Type = decltype(vec[0]/that[0]);
  2929. UT_FixedVector<Type,SIZE> result;
  2930. for (exint i = 0; i < SIZE; ++i)
  2931. result[i] = vec[i] / that[i];
  2932. return result;
  2933. }
  2934. SYS_FORCE_INLINE void operator/=(T that)
  2935. {
  2936. if (std::is_integral<T>::value)
  2937. {
  2938. for (exint i = 0; i < SIZE; ++i)
  2939. vec[i] /= that;
  2940. }
  2941. else
  2942. {
  2943. that = 1/that;
  2944. for (exint i = 0; i < SIZE; ++i)
  2945. vec[i] *= that;
  2946. }
  2947. }
  2948. SYS_FORCE_INLINE UT_FixedVector<T,SIZE> operator/(T that) const
  2949. {
  2950. UT_FixedVector<T,SIZE> result;
  2951. if (std::is_integral<T>::value)
  2952. {
  2953. for (exint i = 0; i < SIZE; ++i)
  2954. result[i] = vec[i] / that;
  2955. }
  2956. else
  2957. {
  2958. that = 1/that;
  2959. for (exint i = 0; i < SIZE; ++i)
  2960. result[i] = vec[i] * that;
  2961. }
  2962. return result;
  2963. }
  2964. SYS_FORCE_INLINE void negate()
  2965. {
  2966. for (exint i = 0; i < SIZE; ++i)
  2967. vec[i] = -vec[i];
  2968. }
  2969. SYS_FORCE_INLINE UT_FixedVector<T,SIZE> operator-() const
  2970. {
  2971. UT_FixedVector<T,SIZE> result;
  2972. for (exint i = 0; i < SIZE; ++i)
  2973. result[i] = -vec[i];
  2974. return result;
  2975. }
  2976. template<typename S,bool S_INSTANTIATED>
  2977. SYS_FORCE_INLINE bool operator==(const UT_FixedVector<S,SIZE,S_INSTANTIATED> &that) const noexcept
  2978. {
  2979. for (exint i = 0; i < SIZE; ++i)
  2980. {
  2981. if (vec[i] != T(that[i]))
  2982. return false;
  2983. }
  2984. return true;
  2985. }
  2986. template<typename S,bool S_INSTANTIATED>
  2987. SYS_FORCE_INLINE bool operator!=(const UT_FixedVector<S,SIZE,S_INSTANTIATED> &that) const noexcept
  2988. {
  2989. return !(*this==that);
  2990. }
  2991. SYS_FORCE_INLINE bool isZero() const noexcept
  2992. {
  2993. for (exint i = 0; i < SIZE; ++i)
  2994. {
  2995. if (vec[i] != T(0))
  2996. return false;
  2997. }
  2998. return true;
  2999. }
  3000. SYS_FORCE_INLINE T maxComponent() const
  3001. {
  3002. T v = vec[0];
  3003. for (exint i = 1; i < SIZE; ++i)
  3004. v = (vec[i] > v) ? vec[i] : v;
  3005. return v;
  3006. }
  3007. SYS_FORCE_INLINE T minComponent() const
  3008. {
  3009. T v = vec[0];
  3010. for (exint i = 1; i < SIZE; ++i)
  3011. v = (vec[i] < v) ? vec[i] : v;
  3012. return v;
  3013. }
  3014. SYS_FORCE_INLINE T avgComponent() const
  3015. {
  3016. T v = vec[0];
  3017. for (exint i = 1; i < SIZE; ++i)
  3018. v += vec[i];
  3019. return v / SIZE;
  3020. }
  3021. SYS_FORCE_INLINE T length2() const noexcept
  3022. {
  3023. T a0(vec[0]);
  3024. T result(a0*a0);
  3025. for (exint i = 1; i < SIZE; ++i)
  3026. {
  3027. T ai(vec[i]);
  3028. result += ai*ai;
  3029. }
  3030. return result;
  3031. }
  3032. SYS_FORCE_INLINE T length() const
  3033. {
  3034. T len2 = length2();
  3035. return SYSsqrt(len2);
  3036. }
  3037. template<typename S,bool S_INSTANTIATED>
  3038. SYS_FORCE_INLINE auto dot(const UT_FixedVector<S,SIZE,S_INSTANTIATED> &that) const -> decltype(vec[0]*that[0])
  3039. {
  3040. using TheType = decltype(vec[0]*that.vec[0]);
  3041. TheType result(vec[0]*that[0]);
  3042. for (exint i = 1; i < SIZE; ++i)
  3043. result += vec[i]*that[i];
  3044. return result;
  3045. }
  3046. template<typename S,bool S_INSTANTIATED>
  3047. SYS_FORCE_INLINE auto distance2(const UT_FixedVector<S,SIZE,S_INSTANTIATED> &that) const -> decltype(vec[0]-that[0])
  3048. {
  3049. using TheType = decltype(vec[0]-that[0]);
  3050. TheType v(vec[0] - that[0]);
  3051. TheType result(v*v);
  3052. for (exint i = 1; i < SIZE; ++i)
  3053. {
  3054. v = vec[i] - that[i];
  3055. result += v*v;
  3056. }
  3057. return result;
  3058. }
  3059. template<typename S,bool S_INSTANTIATED>
  3060. SYS_FORCE_INLINE auto distance(const UT_FixedVector<S,SIZE,S_INSTANTIATED> &that) const -> decltype(vec[0]-that[0])
  3061. {
  3062. auto dist2 = distance2(that);
  3063. return SYSsqrt(dist2);
  3064. }
  3065. SYS_FORCE_INLINE T normalize()
  3066. {
  3067. T len2 = length2();
  3068. if (len2 == T(0))
  3069. return T(0);
  3070. if (len2 == T(1))
  3071. return T(1);
  3072. T len = SYSsqrt(len2);
  3073. // Check if the square root is equal 1. sqrt(1+dx) ~ 1+dx/2,
  3074. // so it may get rounded to 1 when it wasn't 1 before.
  3075. if (len != T(1))
  3076. (*this) /= len;
  3077. return len;
  3078. }
  3079. };
  3080. /// NOTE: Strictly speaking, this should use decltype(that*a[0]),
  3081. /// but in the interests of avoiding accidental precision escalation,
  3082. /// it uses T.
  3083. template<typename T,exint SIZE,bool INSTANTIATED,typename S>
  3084. SYS_FORCE_INLINE UT_FixedVector<T,SIZE> operator*(const S &that,const UT_FixedVector<T,SIZE,INSTANTIATED> &a)
  3085. {
  3086. T t(that);
  3087. UT_FixedVector<T,SIZE> result;
  3088. for (exint i = 0; i < SIZE; ++i)
  3089. result[i] = t * a[i];
  3090. return result;
  3091. }
  3092. template<typename T, exint SIZE, bool INSTANTIATED, typename S, bool S_INSTANTIATED>
  3093. SYS_FORCE_INLINE auto
  3094. dot(const UT_FixedVector<T,SIZE,INSTANTIATED> &a, const UT_FixedVector<S,SIZE,S_INSTANTIATED> &b) -> decltype(a[0]*b[0])
  3095. {
  3096. return a.dot(b);
  3097. }
  3098. template<typename T, exint SIZE, bool INSTANTIATED, typename S, bool S_INSTANTIATED>
  3099. SYS_FORCE_INLINE auto
  3100. SYSmin(const UT_FixedVector<T,SIZE,INSTANTIATED> &a, const UT_FixedVector<S,SIZE,S_INSTANTIATED> &b) -> UT_FixedVector<decltype(a[0]+b[1]), SIZE>
  3101. {
  3102. using Type = decltype(a[0]+b[1]);
  3103. UT_FixedVector<Type, SIZE> result;
  3104. for (exint i = 0; i < SIZE; ++i)
  3105. result[i] = SYSmin(Type(a[i]), Type(b[i]));
  3106. return result;
  3107. }
  3108. template<typename T, exint SIZE, bool INSTANTIATED, typename S, bool S_INSTANTIATED>
  3109. SYS_FORCE_INLINE auto
  3110. SYSmax(const UT_FixedVector<T,SIZE,INSTANTIATED> &a, const UT_FixedVector<S,SIZE,S_INSTANTIATED> &b) -> UT_FixedVector<decltype(a[0]+b[1]), SIZE>
  3111. {
  3112. using Type = decltype(a[0]+b[1]);
  3113. UT_FixedVector<Type, SIZE> result;
  3114. for (exint i = 0; i < SIZE; ++i)
  3115. result[i] = SYSmax(Type(a[i]), Type(b[i]));
  3116. return result;
  3117. }
  3118. template<typename T>
  3119. struct UT_FixedVectorTraits
  3120. {
  3121. typedef UT_FixedVector<T,1> FixedVectorType;
  3122. typedef T DataType;
  3123. static const exint TupleSize = 1;
  3124. static const bool isVectorType = false;
  3125. };
  3126. template<typename T,exint SIZE,bool INSTANTIATED>
  3127. struct UT_FixedVectorTraits<UT_FixedVector<T,SIZE,INSTANTIATED> >
  3128. {
  3129. typedef UT_FixedVector<T,SIZE,INSTANTIATED> FixedVectorType;
  3130. typedef T DataType;
  3131. static const exint TupleSize = SIZE;
  3132. static const bool isVectorType = true;
  3133. };
  3134. }}
  3135. #endif
  3136. /*
  3137. * Copyright (c) 2018 Side Effects Software Inc.
  3138. *
  3139. * Permission is hereby granted, free of charge, to any person obtaining a copy
  3140. * of this software and associated documentation files (the "Software"), to deal
  3141. * in the Software without restriction, including without limitation the rights
  3142. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  3143. * copies of the Software, and to permit persons to whom the Software is
  3144. * furnished to do so, subject to the following conditions:
  3145. *
  3146. * The above copyright notice and this permission notice shall be included in all
  3147. * copies or substantial portions of the Software.
  3148. *
  3149. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  3150. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  3151. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  3152. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  3153. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  3154. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  3155. * SOFTWARE.
  3156. *
  3157. * COMMENTS:
  3158. * Simple wrappers on tbb interface
  3159. */
  3160. #ifndef __UT_ParallelUtil__
  3161. #define __UT_ParallelUtil__
  3162. #include <thread> // This is just included for std::thread::hardware_concurrency()
  3163. namespace igl {
  3164. /// @private
  3165. namespace FastWindingNumber {
  3166. namespace UT_Thread { inline int getNumProcessors() {
  3167. return std::thread::hardware_concurrency();
  3168. }}
  3169. //#include "tbb/blocked_range.h"
  3170. //#include "tbb/parallel_for.h"
  3171. ////namespace tbb { class split; }
  3172. //
  3173. ///// Declare prior to use.
  3174. //template <typename T>
  3175. //using UT_BlockedRange = tbb::blocked_range<T>;
  3176. //
  3177. //// Default implementation that calls range.size()
  3178. //template< typename RANGE >
  3179. //struct UT_EstimatorNumItems
  3180. //{
  3181. // UT_EstimatorNumItems() {}
  3182. //
  3183. // size_t operator()(const RANGE& range) const
  3184. // {
  3185. // return range.size();
  3186. // }
  3187. //};
  3188. //
  3189. ///// This is needed by UT_CoarsenedRange
  3190. //template <typename RANGE>
  3191. //inline size_t UTestimatedNumItems(const RANGE& range)
  3192. //{
  3193. // return UT_EstimatorNumItems<RANGE>()(range);
  3194. //}
  3195. //
  3196. ///// UT_CoarsenedRange: This should be used only inside
  3197. ///// UT_ParallelFor and UT_ParallelReduce
  3198. ///// This class wraps an existing range with a new range.
  3199. ///// This allows us to use simple_partitioner, rather than
  3200. ///// auto_partitioner, which has disastrous performance with
  3201. ///// the default grain size in ttb 4.
  3202. //template< typename RANGE >
  3203. //class UT_CoarsenedRange : public RANGE
  3204. //{
  3205. //public:
  3206. // // Compiler-generated versions are fine:
  3207. // // ~UT_CoarsenedRange();
  3208. // // UT_CoarsenedRange(const UT_CoarsenedRange&);
  3209. //
  3210. // // Split into two sub-ranges:
  3211. // UT_CoarsenedRange(UT_CoarsenedRange& range, tbb::split spl) :
  3212. // RANGE(range, spl),
  3213. // myGrainSize(range.myGrainSize)
  3214. // {
  3215. // }
  3216. //
  3217. // // Inherited: bool empty() const
  3218. //
  3219. // bool is_divisible() const
  3220. // {
  3221. // return
  3222. // RANGE::is_divisible() &&
  3223. // (UTestimatedNumItems(static_cast<const RANGE&>(*this)) > myGrainSize);
  3224. // }
  3225. //
  3226. //private:
  3227. // size_t myGrainSize;
  3228. //
  3229. // UT_CoarsenedRange(const RANGE& base_range, const size_t grain_size) :
  3230. // RANGE(base_range),
  3231. // myGrainSize(grain_size)
  3232. // {
  3233. // }
  3234. //
  3235. // template <typename Range, typename Body>
  3236. // friend void UTparallelFor(
  3237. // const Range &range, const Body &body,
  3238. // const int subscribe_ratio, const int min_grain_size
  3239. // );
  3240. //};
  3241. //
  3242. ///// Run the @c body function over a range in parallel.
  3243. ///// UTparallelFor attempts to spread the range out over at most
  3244. ///// subscribe_ratio * num_processor tasks.
  3245. ///// The factor subscribe_ratio can be used to help balance the load.
  3246. ///// UTparallelFor() uses tbb for its implementation.
  3247. ///// The used grain size is the maximum of min_grain_size and
  3248. ///// if UTestimatedNumItems(range) / (subscribe_ratio * num_processor).
  3249. ///// If subscribe_ratio == 0, then a grain size of min_grain_size will be used.
  3250. ///// A range can be split only when UTestimatedNumItems(range) exceeds the
  3251. ///// grain size the range is divisible.
  3252. //
  3253. /////
  3254. ///// Requirements for the Range functor are:
  3255. ///// - the requirements of the tbb Range Concept
  3256. ///// - UT_estimatorNumItems<Range> must return the the estimated number of work items
  3257. ///// for the range. When Range::size() is not the correct estimate, then a
  3258. ///// (partial) specialization of UT_estimatorNumItemsimatorRange must be provided
  3259. ///// for the type Range.
  3260. /////
  3261. ///// Requirements for the Body function are:
  3262. ///// - @code Body(const Body &); @endcode @n
  3263. ///// Copy Constructor
  3264. ///// - @code Body()::~Body(); @endcode @n
  3265. ///// Destructor
  3266. ///// - @code void Body::operator()(const Range &range) const; @endcode
  3267. ///// Function call to perform operation on the range. Note the operator is
  3268. ///// @b const.
  3269. /////
  3270. ///// The requirements for a Range object are:
  3271. ///// - @code Range::Range(const Range&); @endcode @n
  3272. ///// Copy constructor
  3273. ///// - @code Range::~Range(); @endcode @n
  3274. ///// Destructor
  3275. ///// - @code bool Range::is_divisible() const; @endcode @n
  3276. ///// True if the range can be partitioned into two sub-ranges
  3277. ///// - @code bool Range::empty() const; @endcode @n
  3278. ///// True if the range is empty
  3279. ///// - @code Range::Range(Range &r, UT_Split) const; @endcode @n
  3280. ///// Split the range @c r into two sub-ranges (i.e. modify @c r and *this)
  3281. /////
  3282. ///// Example: @code
  3283. ///// class Square {
  3284. ///// public:
  3285. ///// Square(double *data) : myData(data) {}
  3286. ///// ~Square();
  3287. ///// void operator()(const UT_BlockedRange<int64> &range) const
  3288. ///// {
  3289. ///// for (int64 i = range.begin(); i != range.end(); ++i)
  3290. ///// myData[i] *= myData[i];
  3291. ///// }
  3292. ///// double *myData;
  3293. ///// };
  3294. ///// ...
  3295. /////
  3296. ///// void
  3297. ///// parallel_square(double *array, int64 length)
  3298. ///// {
  3299. ///// UTparallelFor(UT_BlockedRange<int64>(0, length), Square(array));
  3300. ///// }
  3301. ///// @endcode
  3302. /////
  3303. ///// @see UTparallelReduce(), UT_BlockedRange()
  3304. //
  3305. //template <typename Range, typename Body>
  3306. //void UTparallelFor(
  3307. // const Range &range, const Body &body,
  3308. // const int subscribe_ratio = 2,
  3309. // const int min_grain_size = 1
  3310. //)
  3311. //{
  3312. // const size_t num_processors( UT_Thread::getNumProcessors() );
  3313. //
  3314. // UT_ASSERT( num_processors >= 1 );
  3315. // UT_ASSERT( min_grain_size >= 1 );
  3316. // UT_ASSERT( subscribe_ratio >= 0 );
  3317. //
  3318. // const size_t est_range_size( UTestimatedNumItems(range) );
  3319. //
  3320. // // Don't run on an empty range!
  3321. // if (est_range_size == 0)
  3322. // return;
  3323. //
  3324. // // Avoid tbb overhead if entire range needs to be single threaded
  3325. // if (num_processors == 1 || est_range_size <= min_grain_size)
  3326. // {
  3327. // body(range);
  3328. // return;
  3329. // }
  3330. //
  3331. // size_t grain_size(min_grain_size);
  3332. // if( subscribe_ratio > 0 )
  3333. // grain_size = std::max(
  3334. // grain_size,
  3335. // est_range_size / (subscribe_ratio * num_processors)
  3336. // );
  3337. //
  3338. // UT_CoarsenedRange< Range > coarsened_range(range, grain_size);
  3339. //
  3340. // tbb::parallel_for(coarsened_range, body, tbb::simple_partitioner());
  3341. //}
  3342. //
  3343. ///// Version of UTparallelFor that is tuned for the case where the range
  3344. ///// consists of lightweight items, for example,
  3345. ///// float additions or matrix-vector multiplications.
  3346. //template <typename Range, typename Body>
  3347. //void
  3348. //UTparallelForLightItems(const Range &range, const Body &body)
  3349. //{
  3350. // UTparallelFor(range, body, 2, 1024);
  3351. //}
  3352. //
  3353. ///// UTserialFor can be used as a debugging tool to quickly replace a parallel
  3354. ///// for with a serial for.
  3355. //template <typename Range, typename Body>
  3356. //void UTserialFor(const Range &range, const Body &body)
  3357. // { body(range); }
  3358. //
  3359. }}
  3360. #endif
  3361. /*
  3362. * Copyright (c) 2018 Side Effects Software Inc.
  3363. *
  3364. * Permission is hereby granted, free of charge, to any person obtaining a copy
  3365. * of this software and associated documentation files (the "Software"), to deal
  3366. * in the Software without restriction, including without limitation the rights
  3367. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  3368. * copies of the Software, and to permit persons to whom the Software is
  3369. * furnished to do so, subject to the following conditions:
  3370. *
  3371. * The above copyright notice and this permission notice shall be included in all
  3372. * copies or substantial portions of the Software.
  3373. *
  3374. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  3375. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  3376. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  3377. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  3378. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  3379. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  3380. * SOFTWARE.
  3381. *
  3382. * COMMENTS:
  3383. * Bounding Volume Hierarchy (BVH) implementation.
  3384. * To call functions not implemented here, also include UT_BVHImpl.h
  3385. */
  3386. #pragma once
  3387. #ifndef __HDK_UT_BVH_h__
  3388. #define __HDK_UT_BVH_h__
  3389. #include <limits>
  3390. #include <memory>
  3391. namespace igl {
  3392. /// @private
  3393. namespace FastWindingNumber {
  3394. template<typename T> class UT_Array;
  3395. class v4uf;
  3396. class v4uu;
  3397. namespace HDK_Sample {
  3398. namespace UT {
  3399. template<typename T,uint NAXES>
  3400. struct Box {
  3401. T vals[NAXES][2];
  3402. SYS_FORCE_INLINE Box() noexcept = default;
  3403. SYS_FORCE_INLINE constexpr Box(const Box &other) noexcept = default;
  3404. SYS_FORCE_INLINE constexpr Box(Box &&other) noexcept = default;
  3405. SYS_FORCE_INLINE Box& operator=(const Box &other) noexcept = default;
  3406. SYS_FORCE_INLINE Box& operator=(Box &&other) noexcept = default;
  3407. template<typename S>
  3408. SYS_FORCE_INLINE Box(const Box<S,NAXES>& other) noexcept {
  3409. static_assert((std::is_pod<Box<T,NAXES>>::value) || !std::is_pod<T>::value,
  3410. "UT::Box should be POD, for better performance in UT_Array, etc.");
  3411. for (uint axis = 0; axis < NAXES; ++axis) {
  3412. vals[axis][0] = T(other.vals[axis][0]);
  3413. vals[axis][1] = T(other.vals[axis][1]);
  3414. }
  3415. }
  3416. template<typename S,bool INSTANTIATED>
  3417. SYS_FORCE_INLINE Box(const UT_FixedVector<S,NAXES,INSTANTIATED>& pt) noexcept {
  3418. for (uint axis = 0; axis < NAXES; ++axis) {
  3419. vals[axis][0] = pt[axis];
  3420. vals[axis][1] = pt[axis];
  3421. }
  3422. }
  3423. template<typename S>
  3424. SYS_FORCE_INLINE Box& operator=(const Box<S,NAXES>& other) noexcept {
  3425. for (uint axis = 0; axis < NAXES; ++axis) {
  3426. vals[axis][0] = T(other.vals[axis][0]);
  3427. vals[axis][1] = T(other.vals[axis][1]);
  3428. }
  3429. return *this;
  3430. }
  3431. SYS_FORCE_INLINE const T* operator[](const size_t axis) const noexcept {
  3432. UT_ASSERT_P(axis < NAXES);
  3433. return vals[axis];
  3434. }
  3435. SYS_FORCE_INLINE T* operator[](const size_t axis) noexcept {
  3436. UT_ASSERT_P(axis < NAXES);
  3437. return vals[axis];
  3438. }
  3439. SYS_FORCE_INLINE void initBounds() noexcept {
  3440. for (uint axis = 0; axis < NAXES; ++axis) {
  3441. vals[axis][0] = std::numeric_limits<T>::max();
  3442. vals[axis][1] = -std::numeric_limits<T>::max();
  3443. }
  3444. }
  3445. /// Copy the source box.
  3446. /// NOTE: This is so that in templated code that may have a Box or a
  3447. /// UT_FixedVector, it can call initBounds and still work.
  3448. SYS_FORCE_INLINE void initBounds(const Box<T,NAXES>& src) noexcept {
  3449. for (uint axis = 0; axis < NAXES; ++axis) {
  3450. vals[axis][0] = src.vals[axis][0];
  3451. vals[axis][1] = src.vals[axis][1];
  3452. }
  3453. }
  3454. /// Initialize with the union of the source boxes.
  3455. /// NOTE: This is so that in templated code that may have Box's or a
  3456. /// UT_FixedVector's, it can call initBounds and still work.
  3457. SYS_FORCE_INLINE void initBoundsUnordered(const Box<T,NAXES>& src0, const Box<T,NAXES>& src1) noexcept {
  3458. for (uint axis = 0; axis < NAXES; ++axis) {
  3459. vals[axis][0] = SYSmin(src0.vals[axis][0], src1.vals[axis][0]);
  3460. vals[axis][1] = SYSmax(src0.vals[axis][1], src1.vals[axis][1]);
  3461. }
  3462. }
  3463. SYS_FORCE_INLINE void combine(const Box<T,NAXES>& src) noexcept {
  3464. for (uint axis = 0; axis < NAXES; ++axis) {
  3465. T& minv = vals[axis][0];
  3466. T& maxv = vals[axis][1];
  3467. const T curminv = src.vals[axis][0];
  3468. const T curmaxv = src.vals[axis][1];
  3469. minv = (minv < curminv) ? minv : curminv;
  3470. maxv = (maxv > curmaxv) ? maxv : curmaxv;
  3471. }
  3472. }
  3473. SYS_FORCE_INLINE void enlargeBounds(const Box<T,NAXES>& src) noexcept {
  3474. combine(src);
  3475. }
  3476. template<typename S,bool INSTANTIATED>
  3477. SYS_FORCE_INLINE
  3478. void initBounds(const UT_FixedVector<S,NAXES,INSTANTIATED>& pt) noexcept {
  3479. for (uint axis = 0; axis < NAXES; ++axis) {
  3480. vals[axis][0] = pt[axis];
  3481. vals[axis][1] = pt[axis];
  3482. }
  3483. }
  3484. template<bool INSTANTIATED>
  3485. SYS_FORCE_INLINE
  3486. void initBounds(const UT_FixedVector<T,NAXES,INSTANTIATED>& min, const UT_FixedVector<T,NAXES,INSTANTIATED>& max) noexcept {
  3487. for (uint axis = 0; axis < NAXES; ++axis) {
  3488. vals[axis][0] = min[axis];
  3489. vals[axis][1] = max[axis];
  3490. }
  3491. }
  3492. template<bool INSTANTIATED>
  3493. SYS_FORCE_INLINE
  3494. void initBoundsUnordered(const UT_FixedVector<T,NAXES,INSTANTIATED>& p0, const UT_FixedVector<T,NAXES,INSTANTIATED>& p1) noexcept {
  3495. for (uint axis = 0; axis < NAXES; ++axis) {
  3496. vals[axis][0] = SYSmin(p0[axis], p1[axis]);
  3497. vals[axis][1] = SYSmax(p0[axis], p1[axis]);
  3498. }
  3499. }
  3500. template<bool INSTANTIATED>
  3501. SYS_FORCE_INLINE
  3502. void enlargeBounds(const UT_FixedVector<T,NAXES,INSTANTIATED>& pt) noexcept {
  3503. for (uint axis = 0; axis < NAXES; ++axis) {
  3504. vals[axis][0] = SYSmin(vals[axis][0], pt[axis]);
  3505. vals[axis][1] = SYSmax(vals[axis][1], pt[axis]);
  3506. }
  3507. }
  3508. SYS_FORCE_INLINE
  3509. UT_FixedVector<T,NAXES> getMin() const noexcept {
  3510. UT_FixedVector<T,NAXES> v;
  3511. for (uint axis = 0; axis < NAXES; ++axis) {
  3512. v[axis] = vals[axis][0];
  3513. }
  3514. return v;
  3515. }
  3516. SYS_FORCE_INLINE
  3517. UT_FixedVector<T,NAXES> getMax() const noexcept {
  3518. UT_FixedVector<T,NAXES> v;
  3519. for (uint axis = 0; axis < NAXES; ++axis) {
  3520. v[axis] = vals[axis][1];
  3521. }
  3522. return v;
  3523. }
  3524. T diameter2() const noexcept {
  3525. T diff = (vals[0][1]-vals[0][0]);
  3526. T sum = diff*diff;
  3527. for (uint axis = 1; axis < NAXES; ++axis) {
  3528. diff = (vals[axis][1]-vals[axis][0]);
  3529. sum += diff*diff;
  3530. }
  3531. return sum;
  3532. }
  3533. T volume() const noexcept {
  3534. T product = (vals[0][1]-vals[0][0]);
  3535. for (uint axis = 1; axis < NAXES; ++axis) {
  3536. product *= (vals[axis][1]-vals[axis][0]);
  3537. }
  3538. return product;
  3539. }
  3540. T half_surface_area() const noexcept {
  3541. if (NAXES==1) {
  3542. // NOTE: Although this should technically be 1,
  3543. // that doesn't make any sense as a heuristic,
  3544. // so we fall back to the "volume" of this box.
  3545. return (vals[0][1]-vals[0][0]);
  3546. }
  3547. if (NAXES==2) {
  3548. const T d0 = (vals[0][1]-vals[0][0]);
  3549. const T d1 = (vals[1][1]-vals[1][0]);
  3550. return d0 + d1;
  3551. }
  3552. if (NAXES==3) {
  3553. const T d0 = (vals[0][1]-vals[0][0]);
  3554. const T d1 = (vals[1][1]-vals[1][0]);
  3555. const T d2 = (vals[2][1]-vals[2][0]);
  3556. return d0*d1 + d1*d2 + d2*d0;
  3557. }
  3558. if (NAXES==4) {
  3559. const T d0 = (vals[0][1]-vals[0][0]);
  3560. const T d1 = (vals[1][1]-vals[1][0]);
  3561. const T d2 = (vals[2][1]-vals[2][0]);
  3562. const T d3 = (vals[3][1]-vals[3][0]);
  3563. // This is just d0d1d2 + d1d2d3 + d2d3d0 + d3d0d1 refactored.
  3564. const T d0d1 = d0*d1;
  3565. const T d2d3 = d2*d3;
  3566. return d0d1*(d2+d3) + d2d3*(d0+d1);
  3567. }
  3568. T sum = 0;
  3569. for (uint skipped_axis = 0; skipped_axis < NAXES; ++skipped_axis) {
  3570. T product = 1;
  3571. for (uint axis = 0; axis < NAXES; ++axis) {
  3572. if (axis != skipped_axis) {
  3573. product *= (vals[axis][1]-vals[axis][0]);
  3574. }
  3575. }
  3576. sum += product;
  3577. }
  3578. return sum;
  3579. }
  3580. T axis_sum() const noexcept {
  3581. T sum = (vals[0][1]-vals[0][0]);
  3582. for (uint axis = 1; axis < NAXES; ++axis) {
  3583. sum += (vals[axis][1]-vals[axis][0]);
  3584. }
  3585. return sum;
  3586. }
  3587. template<bool INSTANTIATED0,bool INSTANTIATED1>
  3588. SYS_FORCE_INLINE void intersect(
  3589. T &box_tmin,
  3590. T &box_tmax,
  3591. const UT_FixedVector<uint,NAXES,INSTANTIATED0> &signs,
  3592. const UT_FixedVector<T,NAXES,INSTANTIATED1> &origin,
  3593. const UT_FixedVector<T,NAXES,INSTANTIATED1> &inverse_direction
  3594. ) const noexcept {
  3595. for (int axis = 0; axis < NAXES; ++axis)
  3596. {
  3597. uint sign = signs[axis];
  3598. T t1 = (vals[axis][sign] - origin[axis]) * inverse_direction[axis];
  3599. T t2 = (vals[axis][sign^1] - origin[axis]) * inverse_direction[axis];
  3600. box_tmin = SYSmax(t1, box_tmin);
  3601. box_tmax = SYSmin(t2, box_tmax);
  3602. }
  3603. }
  3604. SYS_FORCE_INLINE void intersect(const Box& other, Box& dest) const noexcept {
  3605. for (int axis = 0; axis < NAXES; ++axis)
  3606. {
  3607. dest.vals[axis][0] = SYSmax(vals[axis][0], other.vals[axis][0]);
  3608. dest.vals[axis][1] = SYSmin(vals[axis][1], other.vals[axis][1]);
  3609. }
  3610. }
  3611. template<bool INSTANTIATED>
  3612. SYS_FORCE_INLINE T minDistance2(
  3613. const UT_FixedVector<T,NAXES,INSTANTIATED> &p
  3614. ) const noexcept {
  3615. T diff = SYSmax(SYSmax(vals[0][0]-p[0], p[0]-vals[0][1]), T(0.0f));
  3616. T d2 = diff*diff;
  3617. for (int axis = 1; axis < NAXES; ++axis)
  3618. {
  3619. diff = SYSmax(SYSmax(vals[axis][0]-p[axis], p[axis]-vals[axis][1]), T(0.0f));
  3620. d2 += diff*diff;
  3621. }
  3622. return d2;
  3623. }
  3624. template<bool INSTANTIATED>
  3625. SYS_FORCE_INLINE T maxDistance2(
  3626. const UT_FixedVector<T,NAXES,INSTANTIATED> &p
  3627. ) const noexcept {
  3628. T diff = SYSmax(p[0]-vals[0][0], vals[0][1]-p[0]);
  3629. T d2 = diff*diff;
  3630. for (int axis = 1; axis < NAXES; ++axis)
  3631. {
  3632. diff = SYSmax(p[axis]-vals[axis][0], vals[axis][1]-p[axis]);
  3633. d2 += diff*diff;
  3634. }
  3635. return d2;
  3636. }
  3637. };
  3638. /// Used by BVH::init to specify the heuristic to use for choosing between different box splits.
  3639. /// I tried putting this inside the BVH class, but I had difficulty getting it to compile.
  3640. enum class BVH_Heuristic {
  3641. /// Tries to minimize the sum of axis lengths of the boxes.
  3642. /// This is useful for applications where the probability of a box being applicable to a
  3643. /// query is proportional to the "length", e.g. the probability of a random infinite plane
  3644. /// intersecting the box.
  3645. BOX_PERIMETER,
  3646. /// Tries to minimize the "surface area" of the boxes.
  3647. /// In 3D, uses the surface area; in 2D, uses the perimeter; in 1D, uses the axis length.
  3648. /// This is what most applications, e.g. ray tracing, should use, particularly when the
  3649. /// probability of a box being applicable to a query is proportional to the surface "area",
  3650. /// e.g. the probability of a random ray hitting the box.
  3651. ///
  3652. /// NOTE: USE THIS ONE IF YOU ARE UNSURE!
  3653. BOX_AREA,
  3654. /// Tries to minimize the "volume" of the boxes.
  3655. /// Uses the product of all axis lengths as a heuristic, (volume in 3D, area in 2D, length in 1D).
  3656. /// This is useful for applications where the probability of a box being applicable to a
  3657. /// query is proportional to the "volume", e.g. the probability of a random point being inside the box.
  3658. BOX_VOLUME,
  3659. /// Tries to minimize the "radii" of the boxes (i.e. the distance from the centre to a corner).
  3660. /// This is useful for applications where the probability of a box being applicable to a
  3661. /// query is proportional to the distance to the box centre, e.g. the probability of a random
  3662. /// infinite plane being within the "radius" of the centre.
  3663. BOX_RADIUS,
  3664. /// Tries to minimize the squared "radii" of the boxes (i.e. the squared distance from the centre to a corner).
  3665. /// This is useful for applications where the probability of a box being applicable to a
  3666. /// query is proportional to the squared distance to the box centre, e.g. the probability of a random
  3667. /// ray passing within the "radius" of the centre.
  3668. BOX_RADIUS2,
  3669. /// Tries to minimize the cubed "radii" of the boxes (i.e. the cubed distance from the centre to a corner).
  3670. /// This is useful for applications where the probability of a box being applicable to a
  3671. /// query is proportional to the cubed distance to the box centre, e.g. the probability of a random
  3672. /// point being within the "radius" of the centre.
  3673. BOX_RADIUS3,
  3674. /// Tries to minimize the depth of the tree by primarily splitting at the median of the max axis.
  3675. /// It may fall back to minimizing the area, but the tree depth should be unaffected.
  3676. ///
  3677. /// FIXME: This is not fully implemented yet.
  3678. MEDIAN_MAX_AXIS
  3679. };
  3680. template<uint N>
  3681. class BVH {
  3682. public:
  3683. using INT_TYPE = uint;
  3684. struct Node {
  3685. INT_TYPE child[N];
  3686. static constexpr INT_TYPE theN = N;
  3687. static constexpr INT_TYPE EMPTY = INT_TYPE(-1);
  3688. static constexpr INT_TYPE INTERNAL_BIT = (INT_TYPE(1)<<(sizeof(INT_TYPE)*8 - 1));
  3689. SYS_FORCE_INLINE static INT_TYPE markInternal(INT_TYPE internal_node_num) noexcept {
  3690. return internal_node_num | INTERNAL_BIT;
  3691. }
  3692. SYS_FORCE_INLINE static bool isInternal(INT_TYPE node_int) noexcept {
  3693. return (node_int & INTERNAL_BIT) != 0;
  3694. }
  3695. SYS_FORCE_INLINE static INT_TYPE getInternalNum(INT_TYPE node_int) noexcept {
  3696. return node_int & ~INTERNAL_BIT;
  3697. }
  3698. };
  3699. private:
  3700. struct FreeDeleter {
  3701. SYS_FORCE_INLINE void operator()(Node* p) const {
  3702. if (p) {
  3703. // The pointer was allocated with malloc by UT_Array,
  3704. // so it must be freed with free.
  3705. free(p);
  3706. }
  3707. }
  3708. };
  3709. std::unique_ptr<Node[],FreeDeleter> myRoot;
  3710. INT_TYPE myNumNodes;
  3711. public:
  3712. SYS_FORCE_INLINE BVH() noexcept : myRoot(nullptr), myNumNodes(0) {}
  3713. template<BVH_Heuristic H,typename T,uint NAXES,typename BOX_TYPE,typename SRC_INT_TYPE=INT_TYPE>
  3714. inline void init(const BOX_TYPE* boxes, const INT_TYPE nboxes, SRC_INT_TYPE* indices=nullptr, bool reorder_indices=false, INT_TYPE max_items_per_leaf=1) noexcept;
  3715. template<BVH_Heuristic H,typename T,uint NAXES,typename BOX_TYPE,typename SRC_INT_TYPE=INT_TYPE>
  3716. inline void init(Box<T,NAXES> axes_minmax, const BOX_TYPE* boxes, INT_TYPE nboxes, SRC_INT_TYPE* indices=nullptr, bool reorder_indices=false, INT_TYPE max_items_per_leaf=1) noexcept;
  3717. SYS_FORCE_INLINE
  3718. INT_TYPE getNumNodes() const noexcept
  3719. {
  3720. return myNumNodes;
  3721. }
  3722. SYS_FORCE_INLINE
  3723. const Node *getNodes() const noexcept
  3724. {
  3725. return myRoot.get();
  3726. }
  3727. SYS_FORCE_INLINE
  3728. void clear() noexcept {
  3729. myRoot.reset();
  3730. myNumNodes = 0;
  3731. }
  3732. /// For each node, this effectively does:
  3733. /// LOCAL_DATA local_data[MAX_ORDER];
  3734. /// bool descend = functors.pre(nodei, parent_data);
  3735. /// if (!descend)
  3736. /// return;
  3737. /// for each child {
  3738. /// if (isitem(child))
  3739. /// functors.item(getitemi(child), nodei, local_data[child]);
  3740. /// else if (isnode(child))
  3741. /// recurse(getnodei(child), local_data);
  3742. /// }
  3743. /// functors.post(nodei, parent_nodei, data_for_parent, num_children, local_data);
  3744. template<typename LOCAL_DATA,typename FUNCTORS>
  3745. inline void traverse(
  3746. FUNCTORS &functors,
  3747. LOCAL_DATA *data_for_parent=nullptr) const noexcept;
  3748. /// This acts like the traverse function, except if the number of nodes in two subtrees
  3749. /// of a node contain at least parallel_threshold nodes, they may be executed in parallel.
  3750. /// If parallel_threshold is 0, even item_functor may be executed on items in parallel.
  3751. /// NOTE: Make sure that your functors don't depend on the order that they're executed in,
  3752. /// e.g. don't add values from sibling nodes together except in post functor,
  3753. /// else they might have nondeterministic roundoff or miss some values entirely.
  3754. template<typename LOCAL_DATA,typename FUNCTORS>
  3755. inline void traverseParallel(
  3756. INT_TYPE parallel_threshold,
  3757. FUNCTORS &functors,
  3758. LOCAL_DATA *data_for_parent=nullptr) const noexcept;
  3759. /// For each node, this effectively does:
  3760. /// LOCAL_DATA local_data[MAX_ORDER];
  3761. /// uint descend = functors.pre(nodei, parent_data);
  3762. /// if (!descend)
  3763. /// return;
  3764. /// for each child {
  3765. /// if (!(descend & (1<<child)))
  3766. /// continue;
  3767. /// if (isitem(child))
  3768. /// functors.item(getitemi(child), nodei, local_data[child]);
  3769. /// else if (isnode(child))
  3770. /// recurse(getnodei(child), local_data);
  3771. /// }
  3772. /// functors.post(nodei, parent_nodei, data_for_parent, num_children, local_data);
  3773. template<typename LOCAL_DATA,typename FUNCTORS>
  3774. inline void traverseVector(
  3775. FUNCTORS &functors,
  3776. LOCAL_DATA *data_for_parent=nullptr) const noexcept;
  3777. /// Prints a text representation of the tree to stdout.
  3778. inline void debugDump() const;
  3779. template<typename SRC_INT_TYPE>
  3780. static inline void createTrivialIndices(SRC_INT_TYPE* indices, const INT_TYPE n) noexcept;
  3781. private:
  3782. template<typename LOCAL_DATA,typename FUNCTORS>
  3783. inline void traverseHelper(
  3784. INT_TYPE nodei,
  3785. INT_TYPE parent_nodei,
  3786. FUNCTORS &functors,
  3787. LOCAL_DATA *data_for_parent=nullptr) const noexcept;
  3788. template<typename LOCAL_DATA,typename FUNCTORS>
  3789. inline void traverseParallelHelper(
  3790. INT_TYPE nodei,
  3791. INT_TYPE parent_nodei,
  3792. INT_TYPE parallel_threshold,
  3793. INT_TYPE next_node_id,
  3794. FUNCTORS &functors,
  3795. LOCAL_DATA *data_for_parent=nullptr) const noexcept;
  3796. template<typename LOCAL_DATA,typename FUNCTORS>
  3797. inline void traverseVectorHelper(
  3798. INT_TYPE nodei,
  3799. INT_TYPE parent_nodei,
  3800. FUNCTORS &functors,
  3801. LOCAL_DATA *data_for_parent=nullptr) const noexcept;
  3802. template<typename T,uint NAXES,typename BOX_TYPE,typename SRC_INT_TYPE>
  3803. static inline void computeFullBoundingBox(Box<T,NAXES>& axes_minmax, const BOX_TYPE* boxes, const INT_TYPE nboxes, SRC_INT_TYPE* indices) noexcept;
  3804. template<BVH_Heuristic H,typename T,uint NAXES,typename BOX_TYPE,typename SRC_INT_TYPE>
  3805. static inline void initNode(UT_Array<Node>& nodes, Node &node, const Box<T,NAXES>& axes_minmax, const BOX_TYPE* boxes, SRC_INT_TYPE* indices, const INT_TYPE nboxes) noexcept;
  3806. template<BVH_Heuristic H,typename T,uint NAXES,typename BOX_TYPE,typename SRC_INT_TYPE>
  3807. static inline void initNodeReorder(UT_Array<Node>& nodes, Node &node, const Box<T,NAXES>& axes_minmax, const BOX_TYPE* boxes, SRC_INT_TYPE* indices, const INT_TYPE nboxes, const INT_TYPE indices_offset, const INT_TYPE max_items_per_leaf) noexcept;
  3808. template<BVH_Heuristic H,typename T,uint NAXES,typename BOX_TYPE,typename SRC_INT_TYPE>
  3809. static inline void multiSplit(const Box<T,NAXES>& axes_minmax, const BOX_TYPE* boxes, SRC_INT_TYPE* indices, INT_TYPE nboxes, SRC_INT_TYPE* sub_indices[N+1], Box<T,NAXES> sub_boxes[N]) noexcept;
  3810. template<BVH_Heuristic H,typename T,uint NAXES,typename BOX_TYPE,typename SRC_INT_TYPE>
  3811. static inline void split(const Box<T,NAXES>& axes_minmax, const BOX_TYPE* boxes, SRC_INT_TYPE* indices, INT_TYPE nboxes, SRC_INT_TYPE*& split_indices, Box<T,NAXES>* split_boxes) noexcept;
  3812. template<INT_TYPE PARALLEL_THRESHOLD, typename SRC_INT_TYPE>
  3813. static inline void adjustParallelChildNodes(INT_TYPE nparallel, UT_Array<Node>& nodes, Node& node, UT_Array<Node>* parallel_nodes, SRC_INT_TYPE* sub_indices) noexcept;
  3814. template<typename T,typename BOX_TYPE,typename SRC_INT_TYPE>
  3815. static inline void nthElement(const BOX_TYPE* boxes, SRC_INT_TYPE* indices, const SRC_INT_TYPE* indices_end, const uint axis, SRC_INT_TYPE*const nth) noexcept;
  3816. template<typename T,typename BOX_TYPE,typename SRC_INT_TYPE>
  3817. static inline void partitionByCentre(const BOX_TYPE* boxes, SRC_INT_TYPE*const indices, const SRC_INT_TYPE*const indices_end, const uint axis, const T pivotx2, SRC_INT_TYPE*& ppivot_start, SRC_INT_TYPE*& ppivot_end) noexcept;
  3818. /// An overestimate of the number of nodes needed.
  3819. /// At worst, we could have only 2 children in every leaf, and
  3820. /// then above that, we have a geometric series with r=1/N and a=(sub_nboxes/2)/N
  3821. /// The true worst case might be a little worst than this, but
  3822. /// it's probably fairly unlikely.
  3823. SYS_FORCE_INLINE static INT_TYPE nodeEstimate(const INT_TYPE nboxes) noexcept {
  3824. return nboxes/2 + nboxes/(2*(N-1));
  3825. }
  3826. template<BVH_Heuristic H,typename T, uint NAXES>
  3827. SYS_FORCE_INLINE static T unweightedHeuristic(const Box<T, NAXES>& box) noexcept {
  3828. if (H == BVH_Heuristic::BOX_PERIMETER) {
  3829. return box.axis_sum();
  3830. }
  3831. if (H == BVH_Heuristic::BOX_AREA) {
  3832. return box.half_surface_area();
  3833. }
  3834. if (H == BVH_Heuristic::BOX_VOLUME) {
  3835. return box.volume();
  3836. }
  3837. if (H == BVH_Heuristic::BOX_RADIUS) {
  3838. T diameter2 = box.diameter2();
  3839. return SYSsqrt(diameter2);
  3840. }
  3841. if (H == BVH_Heuristic::BOX_RADIUS2) {
  3842. return box.diameter2();
  3843. }
  3844. if (H == BVH_Heuristic::BOX_RADIUS3) {
  3845. T diameter2 = box.diameter2();
  3846. return diameter2*SYSsqrt(diameter2);
  3847. }
  3848. UT_ASSERT_MSG(0, "BVH_Heuristic::MEDIAN_MAX_AXIS should be handled separately by caller!");
  3849. return T(1);
  3850. }
  3851. /// 16 equal-length spans (15 evenly-spaced splits) should be enough for a decent heuristic
  3852. static constexpr INT_TYPE NSPANS = 16;
  3853. static constexpr INT_TYPE NSPLITS = NSPANS-1;
  3854. /// At least 1/16 of all boxes must be on each side, else we could end up with a very deep tree
  3855. static constexpr INT_TYPE MIN_FRACTION = 16;
  3856. };
  3857. } // UT namespace
  3858. template<uint N>
  3859. using UT_BVH = UT::BVH<N>;
  3860. } // End HDK_Sample namespace
  3861. }}
  3862. #endif
  3863. /*
  3864. * Copyright (c) 2018 Side Effects Software Inc.
  3865. *
  3866. * Permission is hereby granted, free of charge, to any person obtaining a copy
  3867. * of this software and associated documentation files (the "Software"), to deal
  3868. * in the Software without restriction, including without limitation the rights
  3869. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  3870. * copies of the Software, and to permit persons to whom the Software is
  3871. * furnished to do so, subject to the following conditions:
  3872. *
  3873. * The above copyright notice and this permission notice shall be included in all
  3874. * copies or substantial portions of the Software.
  3875. *
  3876. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  3877. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  3878. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  3879. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  3880. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  3881. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  3882. * SOFTWARE.
  3883. *
  3884. * COMMENTS:
  3885. * Bounding Volume Hierarchy (BVH) implementation.
  3886. * The main file is UT_BVH.h; this file is separate so that
  3887. * files that don't actually need to call functions on the BVH
  3888. * won't have unnecessary headers and functions included.
  3889. */
  3890. #pragma once
  3891. #ifndef __HDK_UT_BVHImpl_h__
  3892. #define __HDK_UT_BVHImpl_h__
  3893. #include "parallel_for.h"
  3894. #include <iostream>
  3895. #include <algorithm>
  3896. namespace igl {
  3897. /// @private
  3898. namespace FastWindingNumber {
  3899. namespace HDK_Sample {
  3900. namespace UT {
  3901. template<typename T,uint NAXES>
  3902. SYS_FORCE_INLINE bool utBoxExclude(const UT::Box<T,NAXES>& box) noexcept {
  3903. bool has_nan_or_inf = !SYSisFinite(box[0][0]);
  3904. has_nan_or_inf |= !SYSisFinite(box[0][1]);
  3905. for (uint axis = 1; axis < NAXES; ++axis)
  3906. {
  3907. has_nan_or_inf |= !SYSisFinite(box[axis][0]);
  3908. has_nan_or_inf |= !SYSisFinite(box[axis][1]);
  3909. }
  3910. return has_nan_or_inf;
  3911. }
  3912. template<uint NAXES>
  3913. SYS_FORCE_INLINE bool utBoxExclude(const UT::Box<fpreal32,NAXES>& box) noexcept {
  3914. const int32 *pboxints = reinterpret_cast<const int32*>(&box);
  3915. // Fast check for NaN or infinity: check if exponent bits are 0xFF.
  3916. bool has_nan_or_inf = ((pboxints[0] & 0x7F800000) == 0x7F800000);
  3917. has_nan_or_inf |= ((pboxints[1] & 0x7F800000) == 0x7F800000);
  3918. for (uint axis = 1; axis < NAXES; ++axis)
  3919. {
  3920. has_nan_or_inf |= ((pboxints[2*axis] & 0x7F800000) == 0x7F800000);
  3921. has_nan_or_inf |= ((pboxints[2*axis + 1] & 0x7F800000) == 0x7F800000);
  3922. }
  3923. return has_nan_or_inf;
  3924. }
  3925. template<typename T,uint NAXES>
  3926. SYS_FORCE_INLINE T utBoxCenter(const UT::Box<T,NAXES>& box, uint axis) noexcept {
  3927. const T* v = box.vals[axis];
  3928. return v[0] + v[1];
  3929. }
  3930. template<typename T>
  3931. struct ut_BoxCentre {
  3932. constexpr static uint scale = 2;
  3933. };
  3934. template<typename T,uint NAXES,bool INSTANTIATED>
  3935. SYS_FORCE_INLINE T utBoxExclude(const UT_FixedVector<T,NAXES,INSTANTIATED>& position) noexcept {
  3936. bool has_nan_or_inf = !SYSisFinite(position[0]);
  3937. for (uint axis = 1; axis < NAXES; ++axis)
  3938. has_nan_or_inf |= !SYSisFinite(position[axis]);
  3939. return has_nan_or_inf;
  3940. }
  3941. template<uint NAXES,bool INSTANTIATED>
  3942. SYS_FORCE_INLINE bool utBoxExclude(const UT_FixedVector<fpreal32,NAXES,INSTANTIATED>& position) noexcept {
  3943. const int32 *ppositionints = reinterpret_cast<const int32*>(&position);
  3944. // Fast check for NaN or infinity: check if exponent bits are 0xFF.
  3945. bool has_nan_or_inf = ((ppositionints[0] & 0x7F800000) == 0x7F800000);
  3946. for (uint axis = 1; axis < NAXES; ++axis)
  3947. has_nan_or_inf |= ((ppositionints[axis] & 0x7F800000) == 0x7F800000);
  3948. return has_nan_or_inf;
  3949. }
  3950. template<typename T,uint NAXES,bool INSTANTIATED>
  3951. SYS_FORCE_INLINE T utBoxCenter(const UT_FixedVector<T,NAXES,INSTANTIATED>& position, uint axis) noexcept {
  3952. return position[axis];
  3953. }
  3954. template<typename T,uint NAXES,bool INSTANTIATED>
  3955. struct ut_BoxCentre<UT_FixedVector<T,NAXES,INSTANTIATED>> {
  3956. constexpr static uint scale = 1;
  3957. };
  3958. template<typename BOX_TYPE,typename SRC_INT_TYPE,typename INT_TYPE>
  3959. inline INT_TYPE utExcludeNaNInfBoxIndices(const BOX_TYPE* boxes, SRC_INT_TYPE* indices, INT_TYPE& nboxes) noexcept
  3960. {
  3961. constexpr INT_TYPE PARALLEL_THRESHOLD = 65536;
  3962. INT_TYPE ntasks = 1;
  3963. //if (nboxes >= PARALLEL_THRESHOLD)
  3964. //{
  3965. // INT_TYPE nprocessors = UT_Thread::getNumProcessors();
  3966. // ntasks = (nprocessors > 1) ? SYSmin(4*nprocessors, nboxes/(PARALLEL_THRESHOLD/2)) : 1;
  3967. //}
  3968. //if (ntasks == 1)
  3969. {
  3970. // Serial: easy case; just loop through.
  3971. const SRC_INT_TYPE* indices_end = indices + nboxes;
  3972. // Loop through forward once
  3973. SRC_INT_TYPE* psrc_index = indices;
  3974. for (; psrc_index != indices_end; ++psrc_index)
  3975. {
  3976. const bool exclude = utBoxExclude(boxes[*psrc_index]);
  3977. if (exclude)
  3978. break;
  3979. }
  3980. if (psrc_index == indices_end)
  3981. return 0;
  3982. // First NaN or infinite box
  3983. SRC_INT_TYPE* nan_start = psrc_index;
  3984. for (++psrc_index; psrc_index != indices_end; ++psrc_index)
  3985. {
  3986. const bool exclude = utBoxExclude(boxes[*psrc_index]);
  3987. if (!exclude)
  3988. {
  3989. *nan_start = *psrc_index;
  3990. ++nan_start;
  3991. }
  3992. }
  3993. nboxes = nan_start-indices;
  3994. return indices_end - nan_start;
  3995. }
  3996. }
  3997. template<uint N>
  3998. template<BVH_Heuristic H,typename T,uint NAXES,typename BOX_TYPE,typename SRC_INT_TYPE>
  3999. inline void BVH<N>::init(const BOX_TYPE* boxes, const INT_TYPE nboxes, SRC_INT_TYPE* indices, bool reorder_indices, INT_TYPE max_items_per_leaf) noexcept {
  4000. Box<T,NAXES> axes_minmax;
  4001. computeFullBoundingBox(axes_minmax, boxes, nboxes, indices);
  4002. init<H>(axes_minmax, boxes, nboxes, indices, reorder_indices, max_items_per_leaf);
  4003. }
  4004. template<uint N>
  4005. template<BVH_Heuristic H,typename T,uint NAXES,typename BOX_TYPE,typename SRC_INT_TYPE>
  4006. inline void BVH<N>::init(Box<T,NAXES> axes_minmax, const BOX_TYPE* boxes, INT_TYPE nboxes, SRC_INT_TYPE* indices, bool reorder_indices, INT_TYPE max_items_per_leaf) noexcept {
  4007. // Clear the tree in advance to save memory.
  4008. myRoot.reset();
  4009. if (nboxes == 0) {
  4010. myNumNodes = 0;
  4011. return;
  4012. }
  4013. UT_Array<INT_TYPE> local_indices;
  4014. if (!indices) {
  4015. local_indices.setSizeNoInit(nboxes);
  4016. indices = local_indices.array();
  4017. createTrivialIndices(indices, nboxes);
  4018. }
  4019. // Exclude any boxes with NaNs or infinities by shifting down indices
  4020. // over the bad box indices and updating nboxes.
  4021. INT_TYPE nexcluded = utExcludeNaNInfBoxIndices(boxes, indices, nboxes);
  4022. if (nexcluded != 0) {
  4023. if (nboxes == 0) {
  4024. myNumNodes = 0;
  4025. return;
  4026. }
  4027. computeFullBoundingBox(axes_minmax, boxes, nboxes, indices);
  4028. }
  4029. UT_Array<Node> nodes;
  4030. // Preallocate an overestimate of the number of nodes needed.
  4031. nodes.setCapacity(nodeEstimate(nboxes));
  4032. nodes.setSize(1);
  4033. if (reorder_indices)
  4034. initNodeReorder<H>(nodes, nodes[0], axes_minmax, boxes, indices, nboxes, 0, max_items_per_leaf);
  4035. else
  4036. initNode<H>(nodes, nodes[0], axes_minmax, boxes, indices, nboxes);
  4037. // If capacity is more than 12.5% over the size, rellocate.
  4038. if (8*nodes.capacity() > 9*nodes.size()) {
  4039. nodes.setCapacity(nodes.size());
  4040. }
  4041. // Steal ownership of the array from the UT_Array
  4042. myRoot.reset(nodes.array());
  4043. myNumNodes = nodes.size();
  4044. nodes.unsafeClearData();
  4045. }
  4046. template<uint N>
  4047. template<typename LOCAL_DATA,typename FUNCTORS>
  4048. inline void BVH<N>::traverse(
  4049. FUNCTORS &functors,
  4050. LOCAL_DATA* data_for_parent) const noexcept
  4051. {
  4052. if (!myRoot)
  4053. return;
  4054. // NOTE: The root is always index 0.
  4055. traverseHelper(0, INT_TYPE(-1), functors, data_for_parent);
  4056. }
  4057. template<uint N>
  4058. template<typename LOCAL_DATA,typename FUNCTORS>
  4059. inline void BVH<N>::traverseHelper(
  4060. INT_TYPE nodei,
  4061. INT_TYPE parent_nodei,
  4062. FUNCTORS &functors,
  4063. LOCAL_DATA* data_for_parent) const noexcept
  4064. {
  4065. const Node &node = myRoot[nodei];
  4066. bool descend = functors.pre(nodei, data_for_parent);
  4067. if (!descend)
  4068. return;
  4069. LOCAL_DATA local_data[N];
  4070. INT_TYPE s;
  4071. for (s = 0; s < N; ++s) {
  4072. const INT_TYPE node_int = node.child[s];
  4073. if (Node::isInternal(node_int)) {
  4074. if (node_int == Node::EMPTY) {
  4075. // NOTE: Anything after this will be empty too, so we can break.
  4076. break;
  4077. }
  4078. traverseHelper(Node::getInternalNum(node_int), nodei, functors, &local_data[s]);
  4079. }
  4080. else {
  4081. functors.item(node_int, nodei, local_data[s]);
  4082. }
  4083. }
  4084. // NOTE: s is now the number of non-empty entries in this node.
  4085. functors.post(nodei, parent_nodei, data_for_parent, s, local_data);
  4086. }
  4087. template<uint N>
  4088. template<typename LOCAL_DATA,typename FUNCTORS>
  4089. inline void BVH<N>::traverseParallel(
  4090. INT_TYPE parallel_threshold,
  4091. FUNCTORS& functors,
  4092. LOCAL_DATA* data_for_parent) const noexcept
  4093. {
  4094. if (!myRoot)
  4095. return;
  4096. // NOTE: The root is always index 0.
  4097. traverseParallelHelper(0, INT_TYPE(-1), parallel_threshold, myNumNodes, functors, data_for_parent);
  4098. }
  4099. template<uint N>
  4100. template<typename LOCAL_DATA,typename FUNCTORS>
  4101. inline void BVH<N>::traverseParallelHelper(
  4102. INT_TYPE nodei,
  4103. INT_TYPE parent_nodei,
  4104. INT_TYPE parallel_threshold,
  4105. INT_TYPE next_node_id,
  4106. FUNCTORS& functors,
  4107. LOCAL_DATA* data_for_parent) const noexcept
  4108. {
  4109. const Node &node = myRoot[nodei];
  4110. bool descend = functors.pre(nodei, data_for_parent);
  4111. if (!descend)
  4112. return;
  4113. // To determine the number of nodes in a child's subtree, we take the next
  4114. // node ID minus the current child's node ID.
  4115. INT_TYPE next_nodes[N];
  4116. INT_TYPE nnodes[N];
  4117. INT_TYPE nchildren = N;
  4118. INT_TYPE nparallel = 0;
  4119. // s is currently unsigned, so we check s < N for bounds check.
  4120. // The s >= 0 check is in case s ever becomes signed, and should be
  4121. // automatically removed by the compiler for unsigned s.
  4122. for (INT_TYPE s = N-1; (std::is_signed<INT_TYPE>::value ? (s >= 0) : (s < N)); --s) {
  4123. const INT_TYPE node_int = node.child[s];
  4124. if (node_int == Node::EMPTY) {
  4125. --nchildren;
  4126. continue;
  4127. }
  4128. next_nodes[s] = next_node_id;
  4129. if (Node::isInternal(node_int)) {
  4130. // NOTE: This depends on BVH<N>::initNode appending the child nodes
  4131. // in between their content, instead of all at once.
  4132. INT_TYPE child_node_id = Node::getInternalNum(node_int);
  4133. nnodes[s] = next_node_id - child_node_id;
  4134. next_node_id = child_node_id;
  4135. }
  4136. else {
  4137. nnodes[s] = 0;
  4138. }
  4139. nparallel += (nnodes[s] >= parallel_threshold);
  4140. }
  4141. LOCAL_DATA local_data[N];
  4142. if (nparallel >= 2) {
  4143. // Do any non-parallel ones first
  4144. if (nparallel < nchildren) {
  4145. for (INT_TYPE s = 0; s < N; ++s) {
  4146. if (nnodes[s] >= parallel_threshold) {
  4147. continue;
  4148. }
  4149. const INT_TYPE node_int = node.child[s];
  4150. if (Node::isInternal(node_int)) {
  4151. if (node_int == Node::EMPTY) {
  4152. // NOTE: Anything after this will be empty too, so we can break.
  4153. break;
  4154. }
  4155. traverseHelper(Node::getInternalNum(node_int), nodei, functors, &local_data[s]);
  4156. }
  4157. else {
  4158. functors.item(node_int, nodei, local_data[s]);
  4159. }
  4160. }
  4161. }
  4162. // Now do the parallel ones
  4163. igl::parallel_for(
  4164. nparallel,
  4165. [this,nodei,&node,&nnodes,&next_nodes,&parallel_threshold,&functors,&local_data](int taski)
  4166. {
  4167. INT_TYPE parallel_count = 0;
  4168. // NOTE: The check for s < N is just so that the compiler can
  4169. // (hopefully) figure out that it can fully unroll the loop.
  4170. INT_TYPE s;
  4171. for (s = 0; s < N; ++s) {
  4172. if (nnodes[s] < parallel_threshold) {
  4173. continue;
  4174. }
  4175. if (parallel_count == taski) {
  4176. break;
  4177. }
  4178. ++parallel_count;
  4179. }
  4180. const INT_TYPE node_int = node.child[s];
  4181. if (Node::isInternal(node_int)) {
  4182. UT_ASSERT_MSG_P(node_int != Node::EMPTY, "Empty entries should have been excluded above.");
  4183. traverseParallelHelper(Node::getInternalNum(node_int), nodei, parallel_threshold, next_nodes[s], functors, &local_data[s]);
  4184. }
  4185. else {
  4186. functors.item(node_int, nodei, local_data[s]);
  4187. }
  4188. });
  4189. }
  4190. else {
  4191. // All in serial
  4192. for (INT_TYPE s = 0; s < N; ++s) {
  4193. const INT_TYPE node_int = node.child[s];
  4194. if (Node::isInternal(node_int)) {
  4195. if (node_int == Node::EMPTY) {
  4196. // NOTE: Anything after this will be empty too, so we can break.
  4197. break;
  4198. }
  4199. traverseHelper(Node::getInternalNum(node_int), nodei, functors, &local_data[s]);
  4200. }
  4201. else {
  4202. functors.item(node_int, nodei, local_data[s]);
  4203. }
  4204. }
  4205. }
  4206. functors.post(nodei, parent_nodei, data_for_parent, nchildren, local_data);
  4207. }
  4208. template<uint N>
  4209. template<typename LOCAL_DATA,typename FUNCTORS>
  4210. inline void BVH<N>::traverseVector(
  4211. FUNCTORS &functors,
  4212. LOCAL_DATA* data_for_parent) const noexcept
  4213. {
  4214. if (!myRoot)
  4215. return;
  4216. // NOTE: The root is always index 0.
  4217. traverseVectorHelper(0, INT_TYPE(-1), functors, data_for_parent);
  4218. }
  4219. template<uint N>
  4220. template<typename LOCAL_DATA,typename FUNCTORS>
  4221. inline void BVH<N>::traverseVectorHelper(
  4222. INT_TYPE nodei,
  4223. INT_TYPE parent_nodei,
  4224. FUNCTORS &functors,
  4225. LOCAL_DATA* data_for_parent) const noexcept
  4226. {
  4227. const Node &node = myRoot[nodei];
  4228. INT_TYPE descend = functors.pre(nodei, data_for_parent);
  4229. if (!descend)
  4230. return;
  4231. LOCAL_DATA local_data[N];
  4232. INT_TYPE s;
  4233. for (s = 0; s < N; ++s) {
  4234. if ((descend>>s) & 1) {
  4235. const INT_TYPE node_int = node.child[s];
  4236. if (Node::isInternal(node_int)) {
  4237. if (node_int == Node::EMPTY) {
  4238. // NOTE: Anything after this will be empty too, so we can break.
  4239. descend &= (INT_TYPE(1)<<s)-1;
  4240. break;
  4241. }
  4242. traverseVectorHelper(Node::getInternalNum(node_int), nodei, functors, &local_data[s]);
  4243. }
  4244. else {
  4245. functors.item(node_int, nodei, local_data[s]);
  4246. }
  4247. }
  4248. }
  4249. // NOTE: s is now the number of non-empty entries in this node.
  4250. functors.post(nodei, parent_nodei, data_for_parent, s, local_data, descend);
  4251. }
  4252. template<uint N>
  4253. template<typename SRC_INT_TYPE>
  4254. inline void BVH<N>::createTrivialIndices(SRC_INT_TYPE* indices, const INT_TYPE n) noexcept {
  4255. igl::parallel_for(n, [indices,n](INT_TYPE i) { indices[i] = i; }, 65536);
  4256. }
  4257. template<uint N>
  4258. template<typename T,uint NAXES,typename BOX_TYPE,typename SRC_INT_TYPE>
  4259. inline void BVH<N>::computeFullBoundingBox(Box<T,NAXES>& axes_minmax, const BOX_TYPE* boxes, const INT_TYPE nboxes, SRC_INT_TYPE* indices) noexcept {
  4260. if (!nboxes) {
  4261. axes_minmax.initBounds();
  4262. return;
  4263. }
  4264. INT_TYPE ntasks = 1;
  4265. if (nboxes >= 2*4096) {
  4266. INT_TYPE nprocessors = UT_Thread::getNumProcessors();
  4267. ntasks = (nprocessors > 1) ? SYSmin(4*nprocessors, nboxes/4096) : 1;
  4268. }
  4269. if (ntasks == 1) {
  4270. Box<T,NAXES> box;
  4271. if (indices) {
  4272. box.initBounds(boxes[indices[0]]);
  4273. for (INT_TYPE i = 1; i < nboxes; ++i) {
  4274. box.combine(boxes[indices[i]]);
  4275. }
  4276. }
  4277. else {
  4278. box.initBounds(boxes[0]);
  4279. for (INT_TYPE i = 1; i < nboxes; ++i) {
  4280. box.combine(boxes[i]);
  4281. }
  4282. }
  4283. axes_minmax = box;
  4284. }
  4285. else {
  4286. UT_SmallArray<Box<T,NAXES>> parallel_boxes;
  4287. Box<T,NAXES> box;
  4288. igl::parallel_for(
  4289. nboxes,
  4290. [&parallel_boxes](int n){parallel_boxes.setSize(n);},
  4291. [&parallel_boxes,indices,&boxes](int i, int t)
  4292. {
  4293. if(indices)
  4294. {
  4295. parallel_boxes[t].combine(boxes[indices[i]]);
  4296. }else
  4297. {
  4298. parallel_boxes[t].combine(boxes[i]);
  4299. }
  4300. },
  4301. [&parallel_boxes,&box](int t)
  4302. {
  4303. if(t == 0)
  4304. {
  4305. box = parallel_boxes[0];
  4306. }else
  4307. {
  4308. box.combine(parallel_boxes[t]);
  4309. }
  4310. });
  4311. axes_minmax = box;
  4312. }
  4313. }
  4314. template<uint N>
  4315. template<BVH_Heuristic H,typename T,uint NAXES,typename BOX_TYPE,typename SRC_INT_TYPE>
  4316. inline void BVH<N>::initNode(UT_Array<Node>& nodes, Node &node, const Box<T,NAXES>& axes_minmax, const BOX_TYPE* boxes, SRC_INT_TYPE* indices, const INT_TYPE nboxes) noexcept {
  4317. if (nboxes <= N) {
  4318. // Fits in one node
  4319. for (INT_TYPE i = 0; i < nboxes; ++i) {
  4320. node.child[i] = indices[i];
  4321. }
  4322. for (INT_TYPE i = nboxes; i < N; ++i) {
  4323. node.child[i] = Node::EMPTY;
  4324. }
  4325. return;
  4326. }
  4327. SRC_INT_TYPE* sub_indices[N+1];
  4328. Box<T,NAXES> sub_boxes[N];
  4329. if (N == 2) {
  4330. sub_indices[0] = indices;
  4331. sub_indices[2] = indices+nboxes;
  4332. split<H>(axes_minmax, boxes, indices, nboxes, sub_indices[1], &sub_boxes[0]);
  4333. }
  4334. else {
  4335. multiSplit<H>(axes_minmax, boxes, indices, nboxes, sub_indices, sub_boxes);
  4336. }
  4337. // Count the number of nodes to run in parallel and fill in single items in this node
  4338. INT_TYPE nparallel = 0;
  4339. static constexpr INT_TYPE PARALLEL_THRESHOLD = 1024;
  4340. for (INT_TYPE i = 0; i < N; ++i) {
  4341. INT_TYPE sub_nboxes = sub_indices[i+1]-sub_indices[i];
  4342. if (sub_nboxes == 1) {
  4343. node.child[i] = sub_indices[i][0];
  4344. }
  4345. else if (sub_nboxes >= PARALLEL_THRESHOLD) {
  4346. ++nparallel;
  4347. }
  4348. }
  4349. // NOTE: Child nodes of this node need to be placed just before the nodes in
  4350. // their corresponding subtree, in between the subtrees, because
  4351. // traverseParallel uses the difference between the child node IDs
  4352. // to determine the number of nodes in the subtree.
  4353. // Recurse
  4354. if (nparallel >= 2) {
  4355. UT_SmallArray<UT_Array<Node>> parallel_nodes;
  4356. UT_SmallArray<Node> parallel_parent_nodes;
  4357. parallel_nodes.setSize(nparallel);
  4358. parallel_parent_nodes.setSize(nparallel);
  4359. igl::parallel_for(
  4360. nparallel,
  4361. [&parallel_nodes,&parallel_parent_nodes,&sub_indices,boxes,&sub_boxes](int taski)
  4362. {
  4363. // First, find which child this is
  4364. INT_TYPE counted_parallel = 0;
  4365. INT_TYPE sub_nboxes;
  4366. INT_TYPE childi;
  4367. for (childi = 0; childi < N; ++childi) {
  4368. sub_nboxes = sub_indices[childi+1]-sub_indices[childi];
  4369. if (sub_nboxes >= PARALLEL_THRESHOLD) {
  4370. if (counted_parallel == taski) {
  4371. break;
  4372. }
  4373. ++counted_parallel;
  4374. }
  4375. }
  4376. UT_ASSERT_P(counted_parallel == taski);
  4377. UT_Array<Node>& local_nodes = parallel_nodes[taski];
  4378. // Preallocate an overestimate of the number of nodes needed.
  4379. // At worst, we could have only 2 children in every leaf, and
  4380. // then above that, we have a geometric series with r=1/N and a=(sub_nboxes/2)/N
  4381. // The true worst case might be a little worst than this, but
  4382. // it's probably fairly unlikely.
  4383. local_nodes.setCapacity(nodeEstimate(sub_nboxes));
  4384. Node& parent_node = parallel_parent_nodes[taski];
  4385. // We'll have to fix the internal node numbers in parent_node and local_nodes later
  4386. initNode<H>(local_nodes, parent_node, sub_boxes[childi], boxes, sub_indices[childi], sub_nboxes);
  4387. });
  4388. INT_TYPE counted_parallel = 0;
  4389. for (INT_TYPE i = 0; i < N; ++i) {
  4390. INT_TYPE sub_nboxes = sub_indices[i+1]-sub_indices[i];
  4391. if (sub_nboxes != 1) {
  4392. INT_TYPE local_nodes_start = nodes.size();
  4393. node.child[i] = Node::markInternal(local_nodes_start);
  4394. if (sub_nboxes >= PARALLEL_THRESHOLD) {
  4395. // First, adjust the root child node
  4396. Node child_node = parallel_parent_nodes[counted_parallel];
  4397. ++local_nodes_start;
  4398. for (INT_TYPE childi = 0; childi < N; ++childi) {
  4399. INT_TYPE child_child = child_node.child[childi];
  4400. if (Node::isInternal(child_child) && child_child != Node::EMPTY) {
  4401. child_child += local_nodes_start;
  4402. child_node.child[childi] = child_child;
  4403. }
  4404. }
  4405. // Make space in the array for the sub-child nodes
  4406. const UT_Array<Node>& local_nodes = parallel_nodes[counted_parallel];
  4407. ++counted_parallel;
  4408. INT_TYPE n = local_nodes.size();
  4409. nodes.bumpCapacity(local_nodes_start + n);
  4410. nodes.setSizeNoInit(local_nodes_start + n);
  4411. nodes[local_nodes_start-1] = child_node;
  4412. }
  4413. else {
  4414. nodes.bumpCapacity(local_nodes_start + 1);
  4415. nodes.setSizeNoInit(local_nodes_start + 1);
  4416. initNode<H>(nodes, nodes[local_nodes_start], sub_boxes[i], boxes, sub_indices[i], sub_nboxes);
  4417. }
  4418. }
  4419. }
  4420. // Now, adjust and copy all sub-child nodes that were made in parallel
  4421. adjustParallelChildNodes<PARALLEL_THRESHOLD>(nparallel, nodes, node, parallel_nodes.array(), sub_indices);
  4422. }
  4423. else {
  4424. for (INT_TYPE i = 0; i < N; ++i) {
  4425. INT_TYPE sub_nboxes = sub_indices[i+1]-sub_indices[i];
  4426. if (sub_nboxes != 1) {
  4427. INT_TYPE local_nodes_start = nodes.size();
  4428. node.child[i] = Node::markInternal(local_nodes_start);
  4429. nodes.bumpCapacity(local_nodes_start + 1);
  4430. nodes.setSizeNoInit(local_nodes_start + 1);
  4431. initNode<H>(nodes, nodes[local_nodes_start], sub_boxes[i], boxes, sub_indices[i], sub_nboxes);
  4432. }
  4433. }
  4434. }
  4435. }
  4436. template<uint N>
  4437. template<BVH_Heuristic H,typename T,uint NAXES,typename BOX_TYPE,typename SRC_INT_TYPE>
  4438. inline void BVH<N>::initNodeReorder(UT_Array<Node>& nodes, Node &node, const Box<T,NAXES>& axes_minmax, const BOX_TYPE* boxes, SRC_INT_TYPE* indices, INT_TYPE nboxes, const INT_TYPE indices_offset, const INT_TYPE max_items_per_leaf) noexcept {
  4439. if (nboxes <= N) {
  4440. // Fits in one node
  4441. for (INT_TYPE i = 0; i < nboxes; ++i) {
  4442. node.child[i] = indices_offset+i;
  4443. }
  4444. for (INT_TYPE i = nboxes; i < N; ++i) {
  4445. node.child[i] = Node::EMPTY;
  4446. }
  4447. return;
  4448. }
  4449. SRC_INT_TYPE* sub_indices[N+1];
  4450. Box<T,NAXES> sub_boxes[N];
  4451. if (N == 2) {
  4452. sub_indices[0] = indices;
  4453. sub_indices[2] = indices+nboxes;
  4454. split<H>(axes_minmax, boxes, indices, nboxes, sub_indices[1], &sub_boxes[0]);
  4455. }
  4456. else {
  4457. multiSplit<H>(axes_minmax, boxes, indices, nboxes, sub_indices, sub_boxes);
  4458. }
  4459. // Move any children with max_items_per_leaf or fewer indices before any children with more,
  4460. // for better cache coherence when we're accessing data in a corresponding array.
  4461. INT_TYPE nleaves = 0;
  4462. UT_SmallArray<SRC_INT_TYPE> leaf_indices;
  4463. SRC_INT_TYPE leaf_sizes[N];
  4464. INT_TYPE sub_nboxes0 = sub_indices[1]-sub_indices[0];
  4465. if (sub_nboxes0 <= max_items_per_leaf) {
  4466. leaf_sizes[0] = sub_nboxes0;
  4467. for (int j = 0; j < sub_nboxes0; ++j)
  4468. leaf_indices.append(sub_indices[0][j]);
  4469. ++nleaves;
  4470. }
  4471. INT_TYPE sub_nboxes1 = sub_indices[2]-sub_indices[1];
  4472. if (sub_nboxes1 <= max_items_per_leaf) {
  4473. leaf_sizes[nleaves] = sub_nboxes1;
  4474. for (int j = 0; j < sub_nboxes1; ++j)
  4475. leaf_indices.append(sub_indices[1][j]);
  4476. ++nleaves;
  4477. }
  4478. for (INT_TYPE i = 2; i < N; ++i) {
  4479. INT_TYPE sub_nboxes = sub_indices[i+1]-sub_indices[i];
  4480. if (sub_nboxes <= max_items_per_leaf) {
  4481. leaf_sizes[nleaves] = sub_nboxes;
  4482. for (int j = 0; j < sub_nboxes; ++j)
  4483. leaf_indices.append(sub_indices[i][j]);
  4484. ++nleaves;
  4485. }
  4486. }
  4487. if (nleaves > 0) {
  4488. // NOTE: i < N condition is because INT_TYPE is unsigned.
  4489. // i >= 0 condition is in case INT_TYPE is changed to signed.
  4490. INT_TYPE move_distance = 0;
  4491. INT_TYPE index_move_distance = 0;
  4492. for (INT_TYPE i = N-1; (std::is_signed<INT_TYPE>::value ? (i >= 0) : (i < N)); --i) {
  4493. INT_TYPE sub_nboxes = sub_indices[i+1]-sub_indices[i];
  4494. if (sub_nboxes <= max_items_per_leaf) {
  4495. ++move_distance;
  4496. index_move_distance += sub_nboxes;
  4497. }
  4498. else if (move_distance > 0) {
  4499. SRC_INT_TYPE *start_src_index = sub_indices[i];
  4500. for (SRC_INT_TYPE *src_index = sub_indices[i+1]-1; src_index >= start_src_index; --src_index) {
  4501. src_index[index_move_distance] = src_index[0];
  4502. }
  4503. sub_indices[i+move_distance] = sub_indices[i]+index_move_distance;
  4504. }
  4505. }
  4506. index_move_distance = 0;
  4507. for (INT_TYPE i = 0; i < nleaves; ++i) {
  4508. INT_TYPE sub_nboxes = leaf_sizes[i];
  4509. sub_indices[i] = indices+index_move_distance;
  4510. for (int j = 0; j < sub_nboxes; ++j)
  4511. indices[index_move_distance+j] = leaf_indices[index_move_distance+j];
  4512. index_move_distance += sub_nboxes;
  4513. }
  4514. }
  4515. // Count the number of nodes to run in parallel and fill in single items in this node
  4516. INT_TYPE nparallel = 0;
  4517. static constexpr INT_TYPE PARALLEL_THRESHOLD = 1024;
  4518. for (INT_TYPE i = 0; i < N; ++i) {
  4519. INT_TYPE sub_nboxes = sub_indices[i+1]-sub_indices[i];
  4520. if (sub_nboxes <= max_items_per_leaf) {
  4521. node.child[i] = indices_offset+(sub_indices[i]-sub_indices[0]);
  4522. }
  4523. else if (sub_nboxes >= PARALLEL_THRESHOLD) {
  4524. ++nparallel;
  4525. }
  4526. }
  4527. // NOTE: Child nodes of this node need to be placed just before the nodes in
  4528. // their corresponding subtree, in between the subtrees, because
  4529. // traverseParallel uses the difference between the child node IDs
  4530. // to determine the number of nodes in the subtree.
  4531. // Recurse
  4532. if (nparallel >= 2 && false) {
  4533. assert(false && "Not implemented; should never get here");
  4534. exit(1);
  4535. // // Do the parallel ones first, so that they can be inserted in the right place.
  4536. // // Although the choice may seem somewhat arbitrary, we need the results to be
  4537. // // identical whether we choose to parallelize or not, and in case we change the
  4538. // // threshold later.
  4539. // UT_SmallArray<UT_Array<Node>,4*sizeof(UT_Array<Node>)> parallel_nodes;
  4540. // parallel_nodes.setSize(nparallel);
  4541. // UT_SmallArray<Node,4*sizeof(Node)> parallel_parent_nodes;
  4542. // parallel_parent_nodes.setSize(nparallel);
  4543. // UTparallelFor(UT_BlockedRange<INT_TYPE>(0,nparallel), [&parallel_nodes,&parallel_parent_nodes,&sub_indices,boxes,&sub_boxes,indices_offset,max_items_per_leaf](const UT_BlockedRange<INT_TYPE>& r) {
  4544. // for (INT_TYPE taski = r.begin(), end = r.end(); taski < end; ++taski) {
  4545. // // First, find which child this is
  4546. // INT_TYPE counted_parallel = 0;
  4547. // INT_TYPE sub_nboxes;
  4548. // INT_TYPE childi;
  4549. // for (childi = 0; childi < N; ++childi) {
  4550. // sub_nboxes = sub_indices[childi+1]-sub_indices[childi];
  4551. // if (sub_nboxes >= PARALLEL_THRESHOLD) {
  4552. // if (counted_parallel == taski) {
  4553. // break;
  4554. // }
  4555. // ++counted_parallel;
  4556. // }
  4557. // }
  4558. // UT_ASSERT_P(counted_parallel == taski);
  4559. // UT_Array<Node>& local_nodes = parallel_nodes[taski];
  4560. // // Preallocate an overestimate of the number of nodes needed.
  4561. // // At worst, we could have only 2 children in every leaf, and
  4562. // // then above that, we have a geometric series with r=1/N and a=(sub_nboxes/2)/N
  4563. // // The true worst case might be a little worst than this, but
  4564. // // it's probably fairly unlikely.
  4565. // local_nodes.setCapacity(nodeEstimate(sub_nboxes));
  4566. // Node& parent_node = parallel_parent_nodes[taski];
  4567. // // We'll have to fix the internal node numbers in parent_node and local_nodes later
  4568. // initNodeReorder<H>(local_nodes, parent_node, sub_boxes[childi], boxes, sub_indices[childi], sub_nboxes,
  4569. // indices_offset+(sub_indices[childi]-sub_indices[0]), max_items_per_leaf);
  4570. // }
  4571. // }, 0, 1);
  4572. // INT_TYPE counted_parallel = 0;
  4573. // for (INT_TYPE i = 0; i < N; ++i) {
  4574. // INT_TYPE sub_nboxes = sub_indices[i+1]-sub_indices[i];
  4575. // if (sub_nboxes > max_items_per_leaf) {
  4576. // INT_TYPE local_nodes_start = nodes.size();
  4577. // node.child[i] = Node::markInternal(local_nodes_start);
  4578. // if (sub_nboxes >= PARALLEL_THRESHOLD) {
  4579. // // First, adjust the root child node
  4580. // Node child_node = parallel_parent_nodes[counted_parallel];
  4581. // ++local_nodes_start;
  4582. // for (INT_TYPE childi = 0; childi < N; ++childi) {
  4583. // INT_TYPE child_child = child_node.child[childi];
  4584. // if (Node::isInternal(child_child) && child_child != Node::EMPTY) {
  4585. // child_child += local_nodes_start;
  4586. // child_node.child[childi] = child_child;
  4587. // }
  4588. // }
  4589. // // Make space in the array for the sub-child nodes
  4590. // const UT_Array<Node>& local_nodes = parallel_nodes[counted_parallel];
  4591. // ++counted_parallel;
  4592. // INT_TYPE n = local_nodes.size();
  4593. // nodes.bumpCapacity(local_nodes_start + n);
  4594. // nodes.setSizeNoInit(local_nodes_start + n);
  4595. // nodes[local_nodes_start-1] = child_node;
  4596. // }
  4597. // else {
  4598. // nodes.bumpCapacity(local_nodes_start + 1);
  4599. // nodes.setSizeNoInit(local_nodes_start + 1);
  4600. // initNodeReorder<H>(nodes, nodes[local_nodes_start], sub_boxes[i], boxes, sub_indices[i], sub_nboxes,
  4601. // indices_offset+(sub_indices[i]-sub_indices[0]), max_items_per_leaf);
  4602. // }
  4603. // }
  4604. // }
  4605. // // Now, adjust and copy all sub-child nodes that were made in parallel
  4606. // adjustParallelChildNodes<PARALLEL_THRESHOLD>(nparallel, nodes, node, parallel_nodes.array(), sub_indices);
  4607. }
  4608. else {
  4609. for (INT_TYPE i = 0; i < N; ++i) {
  4610. INT_TYPE sub_nboxes = sub_indices[i+1]-sub_indices[i];
  4611. if (sub_nboxes > max_items_per_leaf) {
  4612. INT_TYPE local_nodes_start = nodes.size();
  4613. node.child[i] = Node::markInternal(local_nodes_start);
  4614. nodes.bumpCapacity(local_nodes_start + 1);
  4615. nodes.setSizeNoInit(local_nodes_start + 1);
  4616. initNodeReorder<H>(nodes, nodes[local_nodes_start], sub_boxes[i], boxes, sub_indices[i], sub_nboxes,
  4617. indices_offset+(sub_indices[i]-sub_indices[0]), max_items_per_leaf);
  4618. }
  4619. }
  4620. }
  4621. }
  4622. template<uint N>
  4623. template<BVH_Heuristic H,typename T,uint NAXES,typename BOX_TYPE,typename SRC_INT_TYPE>
  4624. inline void BVH<N>::multiSplit(const Box<T,NAXES>& axes_minmax, const BOX_TYPE* boxes, SRC_INT_TYPE* indices, INT_TYPE nboxes, SRC_INT_TYPE* sub_indices[N+1], Box<T,NAXES> sub_boxes[N]) noexcept {
  4625. sub_indices[0] = indices;
  4626. sub_indices[2] = indices+nboxes;
  4627. split<H>(axes_minmax, boxes, indices, nboxes, sub_indices[1], &sub_boxes[0]);
  4628. if (N == 2) {
  4629. return;
  4630. }
  4631. if (H == BVH_Heuristic::MEDIAN_MAX_AXIS) {
  4632. SRC_INT_TYPE* sub_indices_startend[2*N];
  4633. Box<T,NAXES> sub_boxes_unsorted[N];
  4634. sub_boxes_unsorted[0] = sub_boxes[0];
  4635. sub_boxes_unsorted[1] = sub_boxes[1];
  4636. sub_indices_startend[0] = sub_indices[0];
  4637. sub_indices_startend[1] = sub_indices[1];
  4638. sub_indices_startend[2] = sub_indices[1];
  4639. sub_indices_startend[3] = sub_indices[2];
  4640. for (INT_TYPE nsub = 2; nsub < N; ++nsub) {
  4641. SRC_INT_TYPE* selected_start = sub_indices_startend[0];
  4642. SRC_INT_TYPE* selected_end = sub_indices_startend[1];
  4643. Box<T,NAXES> sub_box = sub_boxes_unsorted[0];
  4644. // Shift results back.
  4645. for (INT_TYPE i = 0; i < nsub-1; ++i) {
  4646. sub_indices_startend[2*i ] = sub_indices_startend[2*i+2];
  4647. sub_indices_startend[2*i+1] = sub_indices_startend[2*i+3];
  4648. }
  4649. for (INT_TYPE i = 0; i < nsub-1; ++i) {
  4650. sub_boxes_unsorted[i] = sub_boxes_unsorted[i-1];
  4651. }
  4652. // Do the split
  4653. split<H>(sub_box, boxes, selected_start, selected_end-selected_start, sub_indices_startend[2*nsub-1], &sub_boxes_unsorted[nsub]);
  4654. sub_indices_startend[2*nsub-2] = selected_start;
  4655. sub_indices_startend[2*nsub] = sub_indices_startend[2*nsub-1];
  4656. sub_indices_startend[2*nsub+1] = selected_end;
  4657. // Sort pointers so that they're in the correct order
  4658. sub_indices[N] = indices+nboxes;
  4659. for (INT_TYPE i = 0; i < N; ++i) {
  4660. SRC_INT_TYPE* prev_pointer = (i != 0) ? sub_indices[i-1] : nullptr;
  4661. SRC_INT_TYPE* min_pointer = nullptr;
  4662. Box<T,NAXES> box;
  4663. for (INT_TYPE j = 0; j < N; ++j) {
  4664. SRC_INT_TYPE* cur_pointer = sub_indices_startend[2*j];
  4665. if ((cur_pointer > prev_pointer) && (!min_pointer || (cur_pointer < min_pointer))) {
  4666. min_pointer = cur_pointer;
  4667. box = sub_boxes_unsorted[j];
  4668. }
  4669. }
  4670. UT_ASSERT_P(min_pointer);
  4671. sub_indices[i] = min_pointer;
  4672. sub_boxes[i] = box;
  4673. }
  4674. }
  4675. }
  4676. else {
  4677. T sub_box_areas[N];
  4678. sub_box_areas[0] = unweightedHeuristic<H>(sub_boxes[0]);
  4679. sub_box_areas[1] = unweightedHeuristic<H>(sub_boxes[1]);
  4680. for (INT_TYPE nsub = 2; nsub < N; ++nsub) {
  4681. // Choose which one to split
  4682. INT_TYPE split_choice = INT_TYPE(-1);
  4683. T max_heuristic;
  4684. for (INT_TYPE i = 0; i < nsub; ++i) {
  4685. const INT_TYPE index_count = (sub_indices[i+1]-sub_indices[i]);
  4686. if (index_count > 1) {
  4687. const T heuristic = sub_box_areas[i]*index_count;
  4688. if (split_choice == INT_TYPE(-1) || heuristic > max_heuristic) {
  4689. split_choice = i;
  4690. max_heuristic = heuristic;
  4691. }
  4692. }
  4693. }
  4694. UT_ASSERT_MSG_P(split_choice != INT_TYPE(-1), "There should always be at least one that can be split!");
  4695. SRC_INT_TYPE* selected_start = sub_indices[split_choice];
  4696. SRC_INT_TYPE* selected_end = sub_indices[split_choice+1];
  4697. // Shift results over; we can skip the one we selected.
  4698. for (INT_TYPE i = nsub; i > split_choice; --i) {
  4699. sub_indices[i+1] = sub_indices[i];
  4700. }
  4701. for (INT_TYPE i = nsub-1; i > split_choice; --i) {
  4702. sub_boxes[i+1] = sub_boxes[i];
  4703. }
  4704. for (INT_TYPE i = nsub-1; i > split_choice; --i) {
  4705. sub_box_areas[i+1] = sub_box_areas[i];
  4706. }
  4707. // Do the split
  4708. split<H>(sub_boxes[split_choice], boxes, selected_start, selected_end-selected_start, sub_indices[split_choice+1], &sub_boxes[split_choice]);
  4709. sub_box_areas[split_choice] = unweightedHeuristic<H>(sub_boxes[split_choice]);
  4710. sub_box_areas[split_choice+1] = unweightedHeuristic<H>(sub_boxes[split_choice+1]);
  4711. }
  4712. }
  4713. }
  4714. template<uint N>
  4715. template<BVH_Heuristic H,typename T,uint NAXES,typename BOX_TYPE,typename SRC_INT_TYPE>
  4716. inline void BVH<N>::split(const Box<T,NAXES>& axes_minmax, const BOX_TYPE* boxes, SRC_INT_TYPE* indices, INT_TYPE nboxes, SRC_INT_TYPE*& split_indices, Box<T,NAXES>* split_boxes) noexcept {
  4717. if (nboxes == 2) {
  4718. split_boxes[0].initBounds(boxes[indices[0]]);
  4719. split_boxes[1].initBounds(boxes[indices[1]]);
  4720. split_indices = indices+1;
  4721. return;
  4722. }
  4723. UT_ASSERT_MSG_P(nboxes > 2, "Cases with less than 3 boxes should have already been handled!");
  4724. if (H == BVH_Heuristic::MEDIAN_MAX_AXIS) {
  4725. UT_ASSERT_MSG(0, "FIXME: Implement this!!!");
  4726. }
  4727. constexpr INT_TYPE SMALL_LIMIT = 6;
  4728. if (nboxes <= SMALL_LIMIT) {
  4729. // Special case for a small number of boxes: check all (2^(n-1))-1 partitions.
  4730. // Without loss of generality, we assume that box 0 is in partition 0,
  4731. // and that not all boxes are in partition 0.
  4732. Box<T,NAXES> local_boxes[SMALL_LIMIT];
  4733. for (INT_TYPE box = 0; box < nboxes; ++box) {
  4734. local_boxes[box].initBounds(boxes[indices[box]]);
  4735. //printf("Box %u: (%f-%f)x(%f-%f)x(%f-%f)\n", uint(box), local_boxes[box].vals[0][0], local_boxes[box].vals[0][1], local_boxes[box].vals[1][0], local_boxes[box].vals[1][1], local_boxes[box].vals[2][0], local_boxes[box].vals[2][1]);
  4736. }
  4737. const INT_TYPE partition_limit = (INT_TYPE(1)<<(nboxes-1));
  4738. INT_TYPE best_partition = INT_TYPE(-1);
  4739. T best_heuristic;
  4740. for (INT_TYPE partition_bits = 1; partition_bits < partition_limit; ++partition_bits) {
  4741. Box<T,NAXES> sub_boxes[2];
  4742. sub_boxes[0] = local_boxes[0];
  4743. sub_boxes[1].initBounds();
  4744. INT_TYPE sub_counts[2] = {1,0};
  4745. for (INT_TYPE bit = 0; bit < nboxes-1; ++bit) {
  4746. INT_TYPE dest = (partition_bits>>bit)&1;
  4747. sub_boxes[dest].combine(local_boxes[bit+1]);
  4748. ++sub_counts[dest];
  4749. }
  4750. //printf("Partition bits %u: sub_box[0]: (%f-%f)x(%f-%f)x(%f-%f)\n", uint(partition_bits), sub_boxes[0].vals[0][0], sub_boxes[0].vals[0][1], sub_boxes[0].vals[1][0], sub_boxes[0].vals[1][1], sub_boxes[0].vals[2][0], sub_boxes[0].vals[2][1]);
  4751. //printf("Partition bits %u: sub_box[1]: (%f-%f)x(%f-%f)x(%f-%f)\n", uint(partition_bits), sub_boxes[1].vals[0][0], sub_boxes[1].vals[0][1], sub_boxes[1].vals[1][0], sub_boxes[1].vals[1][1], sub_boxes[1].vals[2][0], sub_boxes[1].vals[2][1]);
  4752. const T heuristic =
  4753. unweightedHeuristic<H>(sub_boxes[0])*sub_counts[0] +
  4754. unweightedHeuristic<H>(sub_boxes[1])*sub_counts[1];
  4755. //printf("Partition bits %u: heuristic = %f (= %f*%u + %f*%u)\n",uint(partition_bits),heuristic, unweightedHeuristic<H>(sub_boxes[0]), uint(sub_counts[0]), unweightedHeuristic<H>(sub_boxes[1]), uint(sub_counts[1]));
  4756. if (best_partition == INT_TYPE(-1) || heuristic < best_heuristic) {
  4757. //printf(" New best\n");
  4758. best_partition = partition_bits;
  4759. best_heuristic = heuristic;
  4760. split_boxes[0] = sub_boxes[0];
  4761. split_boxes[1] = sub_boxes[1];
  4762. }
  4763. }
  4764. #if 0 // This isn't actually necessary with the current design, because I changed how the number of subtree nodes is determined.
  4765. // If best_partition is partition_limit-1, there's only 1 box
  4766. // in partition 0. We should instead put this in partition 1,
  4767. // so that we can help always have the internal node indices first
  4768. // in each node. That gets used to (fairly) quickly determine
  4769. // the number of nodes in a sub-tree.
  4770. if (best_partition == partition_limit - 1) {
  4771. // Put the first index last.
  4772. SRC_INT_TYPE last_index = indices[0];
  4773. SRC_INT_TYPE* dest_indices = indices;
  4774. SRC_INT_TYPE* local_split_indices = indices + nboxes-1;
  4775. for (; dest_indices != local_split_indices; ++dest_indices) {
  4776. dest_indices[0] = dest_indices[1];
  4777. }
  4778. *local_split_indices = last_index;
  4779. split_indices = local_split_indices;
  4780. // Swap the boxes
  4781. const Box<T,NAXES> temp_box = sub_boxes[0];
  4782. sub_boxes[0] = sub_boxes[1];
  4783. sub_boxes[1] = temp_box;
  4784. return;
  4785. }
  4786. #endif
  4787. // Reorder the indices.
  4788. // NOTE: Index 0 is always in partition 0, so can stay put.
  4789. SRC_INT_TYPE local_indices[SMALL_LIMIT-1];
  4790. for (INT_TYPE box = 0; box < nboxes-1; ++box) {
  4791. local_indices[box] = indices[box+1];
  4792. }
  4793. SRC_INT_TYPE* dest_indices = indices+1;
  4794. SRC_INT_TYPE* src_indices = local_indices;
  4795. // Copy partition 0
  4796. for (INT_TYPE bit = 0; bit < nboxes-1; ++bit, ++src_indices) {
  4797. if (!((best_partition>>bit)&1)) {
  4798. //printf("Copying %u into partition 0\n",uint(*src_indices));
  4799. *dest_indices = *src_indices;
  4800. ++dest_indices;
  4801. }
  4802. }
  4803. split_indices = dest_indices;
  4804. // Copy partition 1
  4805. src_indices = local_indices;
  4806. for (INT_TYPE bit = 0; bit < nboxes-1; ++bit, ++src_indices) {
  4807. if ((best_partition>>bit)&1) {
  4808. //printf("Copying %u into partition 1\n",uint(*src_indices));
  4809. *dest_indices = *src_indices;
  4810. ++dest_indices;
  4811. }
  4812. }
  4813. return;
  4814. }
  4815. uint max_axis = 0;
  4816. T max_axis_length = axes_minmax.vals[0][1] - axes_minmax.vals[0][0];
  4817. for (uint axis = 1; axis < NAXES; ++axis) {
  4818. const T axis_length = axes_minmax.vals[axis][1] - axes_minmax.vals[axis][0];
  4819. if (axis_length > max_axis_length) {
  4820. max_axis = axis;
  4821. max_axis_length = axis_length;
  4822. }
  4823. }
  4824. if (!(max_axis_length > T(0))) {
  4825. // All boxes are a single point or NaN.
  4826. // Pick an arbitrary split point.
  4827. split_indices = indices + nboxes/2;
  4828. split_boxes[0] = axes_minmax;
  4829. split_boxes[1] = axes_minmax;
  4830. return;
  4831. }
  4832. const INT_TYPE axis = max_axis;
  4833. constexpr INT_TYPE MID_LIMIT = 2*NSPANS;
  4834. if (nboxes <= MID_LIMIT) {
  4835. // Sort along axis, and try all possible splits.
  4836. #if 1
  4837. // First, compute midpoints
  4838. T midpointsx2[MID_LIMIT];
  4839. for (INT_TYPE i = 0; i < nboxes; ++i) {
  4840. midpointsx2[i] = utBoxCenter(boxes[indices[i]], axis);
  4841. }
  4842. SRC_INT_TYPE local_indices[MID_LIMIT];
  4843. for (INT_TYPE i = 0; i < nboxes; ++i) {
  4844. local_indices[i] = i;
  4845. }
  4846. const INT_TYPE chunk_starts[5] = {0, nboxes/4, nboxes/2, INT_TYPE((3*uint64(nboxes))/4), nboxes};
  4847. // For sorting, insertion sort 4 chunks and merge them
  4848. for (INT_TYPE chunk = 0; chunk < 4; ++chunk) {
  4849. const INT_TYPE start = chunk_starts[chunk];
  4850. const INT_TYPE end = chunk_starts[chunk+1];
  4851. for (INT_TYPE i = start+1; i < end; ++i) {
  4852. SRC_INT_TYPE indexi = local_indices[i];
  4853. T vi = midpointsx2[indexi];
  4854. for (INT_TYPE j = start; j < i; ++j) {
  4855. SRC_INT_TYPE indexj = local_indices[j];
  4856. T vj = midpointsx2[indexj];
  4857. if (vi < vj) {
  4858. do {
  4859. local_indices[j] = indexi;
  4860. indexi = indexj;
  4861. ++j;
  4862. if (j == i) {
  4863. local_indices[j] = indexi;
  4864. break;
  4865. }
  4866. indexj = local_indices[j];
  4867. } while (true);
  4868. break;
  4869. }
  4870. }
  4871. }
  4872. }
  4873. // Merge chunks into another buffer
  4874. SRC_INT_TYPE local_indices_temp[MID_LIMIT];
  4875. std::merge(local_indices, local_indices+chunk_starts[1],
  4876. local_indices+chunk_starts[1], local_indices+chunk_starts[2],
  4877. local_indices_temp, [&midpointsx2](const SRC_INT_TYPE a, const SRC_INT_TYPE b)->bool {
  4878. return midpointsx2[a] < midpointsx2[b];
  4879. });
  4880. std::merge(local_indices+chunk_starts[2], local_indices+chunk_starts[3],
  4881. local_indices+chunk_starts[3], local_indices+chunk_starts[4],
  4882. local_indices_temp+chunk_starts[2], [&midpointsx2](const SRC_INT_TYPE a, const SRC_INT_TYPE b)->bool {
  4883. return midpointsx2[a] < midpointsx2[b];
  4884. });
  4885. std::merge(local_indices_temp, local_indices_temp+chunk_starts[2],
  4886. local_indices_temp+chunk_starts[2], local_indices_temp+chunk_starts[4],
  4887. local_indices, [&midpointsx2](const SRC_INT_TYPE a, const SRC_INT_TYPE b)->bool {
  4888. return midpointsx2[a] < midpointsx2[b];
  4889. });
  4890. // Translate local_indices into indices
  4891. for (INT_TYPE i = 0; i < nboxes; ++i) {
  4892. local_indices[i] = indices[local_indices[i]];
  4893. }
  4894. // Copy back
  4895. for (INT_TYPE i = 0; i < nboxes; ++i) {
  4896. indices[i] = local_indices[i];
  4897. }
  4898. #else
  4899. std::stable_sort(indices, indices+nboxes, [boxes,max_axis](SRC_INT_TYPE a, SRC_INT_TYPE b)->bool {
  4900. return utBoxCenter(boxes[a], max_axis) < utBoxCenter(boxes[b], max_axis);
  4901. });
  4902. #endif
  4903. // Accumulate boxes
  4904. Box<T,NAXES> left_boxes[MID_LIMIT-1];
  4905. Box<T,NAXES> right_boxes[MID_LIMIT-1];
  4906. const INT_TYPE nsplits = nboxes-1;
  4907. Box<T,NAXES> box_accumulator(boxes[local_indices[0]]);
  4908. left_boxes[0] = box_accumulator;
  4909. for (INT_TYPE i = 1; i < nsplits; ++i) {
  4910. box_accumulator.combine(boxes[local_indices[i]]);
  4911. left_boxes[i] = box_accumulator;
  4912. }
  4913. box_accumulator.initBounds(boxes[local_indices[nsplits-1]]);
  4914. right_boxes[nsplits-1] = box_accumulator;
  4915. for (INT_TYPE i = nsplits-1; i > 0; --i) {
  4916. box_accumulator.combine(boxes[local_indices[i]]);
  4917. right_boxes[i-1] = box_accumulator;
  4918. }
  4919. INT_TYPE best_split = 0;
  4920. T best_local_heuristic =
  4921. unweightedHeuristic<H>(left_boxes[0]) +
  4922. unweightedHeuristic<H>(right_boxes[0])*(nboxes-1);
  4923. for (INT_TYPE split = 1; split < nsplits; ++split) {
  4924. const T heuristic =
  4925. unweightedHeuristic<H>(left_boxes[split])*(split+1) +
  4926. unweightedHeuristic<H>(right_boxes[split])*(nboxes-(split+1));
  4927. if (heuristic < best_local_heuristic) {
  4928. best_split = split;
  4929. best_local_heuristic = heuristic;
  4930. }
  4931. }
  4932. split_indices = indices+best_split+1;
  4933. split_boxes[0] = left_boxes[best_split];
  4934. split_boxes[1] = right_boxes[best_split];
  4935. return;
  4936. }
  4937. const T axis_min = axes_minmax.vals[max_axis][0];
  4938. const T axis_length = max_axis_length;
  4939. Box<T,NAXES> span_boxes[NSPANS];
  4940. for (INT_TYPE i = 0; i < NSPANS; ++i) {
  4941. span_boxes[i].initBounds();
  4942. }
  4943. INT_TYPE span_counts[NSPANS];
  4944. for (INT_TYPE i = 0; i < NSPANS; ++i) {
  4945. span_counts[i] = 0;
  4946. }
  4947. const T axis_min_x2 = ut_BoxCentre<BOX_TYPE>::scale*axis_min;
  4948. // NOTE: Factor of 0.5 is factored out of the average when using the average value to determine the span that a box lies in.
  4949. const T axis_index_scale = (T(1.0/ut_BoxCentre<BOX_TYPE>::scale)*NSPANS)/axis_length;
  4950. constexpr INT_TYPE BOX_SPANS_PARALLEL_THRESHOLD = 2048;
  4951. INT_TYPE ntasks = 1;
  4952. if (nboxes >= BOX_SPANS_PARALLEL_THRESHOLD) {
  4953. INT_TYPE nprocessors = UT_Thread::getNumProcessors();
  4954. ntasks = (nprocessors > 1) ? SYSmin(4*nprocessors, nboxes/(BOX_SPANS_PARALLEL_THRESHOLD/2)) : 1;
  4955. }
  4956. if (ntasks == 1) {
  4957. for (INT_TYPE indexi = 0; indexi < nboxes; ++indexi) {
  4958. const auto& box = boxes[indices[indexi]];
  4959. const T sum = utBoxCenter(box, axis);
  4960. const uint span_index = SYSclamp(int((sum-axis_min_x2)*axis_index_scale), int(0), int(NSPANS-1));
  4961. ++span_counts[span_index];
  4962. Box<T,NAXES>& span_box = span_boxes[span_index];
  4963. span_box.combine(box);
  4964. }
  4965. }
  4966. else {
  4967. UT_SmallArray<Box<T,NAXES>> parallel_boxes;
  4968. UT_SmallArray<INT_TYPE> parallel_counts;
  4969. igl::parallel_for(
  4970. nboxes,
  4971. [&parallel_boxes,&parallel_counts](int n)
  4972. {
  4973. parallel_boxes.setSize( NSPANS*n);
  4974. parallel_counts.setSize(NSPANS*n);
  4975. for(int t = 0;t<n;t++)
  4976. {
  4977. for (INT_TYPE i = 0; i < NSPANS; ++i)
  4978. {
  4979. parallel_boxes[t*NSPANS+i].initBounds();
  4980. parallel_counts[t*NSPANS+i] = 0;
  4981. }
  4982. }
  4983. },
  4984. [&parallel_boxes,&parallel_counts,&boxes,indices,axis,axis_min_x2,axis_index_scale](int j, int t)
  4985. {
  4986. const auto& box = boxes[indices[j]];
  4987. const T sum = utBoxCenter(box, axis);
  4988. const uint span_index = SYSclamp(int((sum-axis_min_x2)*axis_index_scale), int(0), int(NSPANS-1));
  4989. ++parallel_counts[t*NSPANS+span_index];
  4990. Box<T,NAXES>& span_box = parallel_boxes[t*NSPANS+span_index];
  4991. span_box.combine(box);
  4992. },
  4993. [&parallel_boxes,&parallel_counts,&span_boxes,&span_counts](int t)
  4994. {
  4995. for(int i = 0;i<NSPANS;i++)
  4996. {
  4997. span_counts[i] += parallel_counts[t*NSPANS + i];
  4998. span_boxes[i].combine(parallel_boxes[t*NSPANS + i]);
  4999. }
  5000. });
  5001. }
  5002. // Spans 0 to NSPANS-2
  5003. Box<T,NAXES> left_boxes[NSPLITS];
  5004. // Spans 1 to NSPANS-1
  5005. Box<T,NAXES> right_boxes[NSPLITS];
  5006. // Accumulate boxes
  5007. Box<T,NAXES> box_accumulator = span_boxes[0];
  5008. left_boxes[0] = box_accumulator;
  5009. for (INT_TYPE i = 1; i < NSPLITS; ++i) {
  5010. box_accumulator.combine(span_boxes[i]);
  5011. left_boxes[i] = box_accumulator;
  5012. }
  5013. box_accumulator = span_boxes[NSPANS-1];
  5014. right_boxes[NSPLITS-1] = box_accumulator;
  5015. for (INT_TYPE i = NSPLITS-1; i > 0; --i) {
  5016. box_accumulator.combine(span_boxes[i]);
  5017. right_boxes[i-1] = box_accumulator;
  5018. }
  5019. INT_TYPE left_counts[NSPLITS];
  5020. // Accumulate counts
  5021. INT_TYPE count_accumulator = span_counts[0];
  5022. left_counts[0] = count_accumulator;
  5023. for (INT_TYPE spliti = 1; spliti < NSPLITS; ++spliti) {
  5024. count_accumulator += span_counts[spliti];
  5025. left_counts[spliti] = count_accumulator;
  5026. }
  5027. // Check which split is optimal, making sure that at least 1/MIN_FRACTION of all boxes are on each side.
  5028. const INT_TYPE min_count = nboxes/MIN_FRACTION;
  5029. UT_ASSERT_MSG_P(min_count > 0, "MID_LIMIT above should have been large enough that nboxes would be > MIN_FRACTION");
  5030. const INT_TYPE max_count = ((MIN_FRACTION-1)*uint64(nboxes))/MIN_FRACTION;
  5031. UT_ASSERT_MSG_P(max_count < nboxes, "I'm not sure how this could happen mathematically, but it needs to be checked.");
  5032. T smallest_heuristic = std::numeric_limits<T>::infinity();
  5033. INT_TYPE split_index = -1;
  5034. for (INT_TYPE spliti = 0; spliti < NSPLITS; ++spliti) {
  5035. const INT_TYPE left_count = left_counts[spliti];
  5036. if (left_count < min_count || left_count > max_count) {
  5037. continue;
  5038. }
  5039. const INT_TYPE right_count = nboxes-left_count;
  5040. const T heuristic =
  5041. left_count*unweightedHeuristic<H>(left_boxes[spliti]) +
  5042. right_count*unweightedHeuristic<H>(right_boxes[spliti]);
  5043. if (heuristic < smallest_heuristic) {
  5044. smallest_heuristic = heuristic;
  5045. split_index = spliti;
  5046. }
  5047. }
  5048. SRC_INT_TYPE*const indices_end = indices+nboxes;
  5049. if (split_index == -1) {
  5050. // No split was anywhere close to balanced, so we fall back to searching for one.
  5051. // First, find the span containing the "balance" point, namely where left_counts goes from
  5052. // being less than min_count to more than max_count.
  5053. // If that's span 0, use max_count as the ordered index to select,
  5054. // if it's span NSPANS-1, use min_count as the ordered index to select,
  5055. // else use nboxes/2 as the ordered index to select.
  5056. //T min_pivotx2 = -std::numeric_limits<T>::infinity();
  5057. //T max_pivotx2 = std::numeric_limits<T>::infinity();
  5058. SRC_INT_TYPE* nth_index;
  5059. if (left_counts[0] > max_count) {
  5060. // Search for max_count ordered index
  5061. nth_index = indices+max_count;
  5062. //max_pivotx2 = max_axis_min_x2 + max_axis_length/(NSPANS/ut_BoxCentre<BOX_TYPE>::scale);
  5063. }
  5064. else if (left_counts[NSPLITS-1] < min_count) {
  5065. // Search for min_count ordered index
  5066. nth_index = indices+min_count;
  5067. //min_pivotx2 = max_axis_min_x2 + max_axis_length - max_axis_length/(NSPANS/ut_BoxCentre<BOX_TYPE>::scale);
  5068. }
  5069. else {
  5070. // Search for nboxes/2 ordered index
  5071. nth_index = indices+nboxes/2;
  5072. //for (INT_TYPE spliti = 1; spliti < NSPLITS; ++spliti) {
  5073. // // The second condition should be redundant, but is just in case.
  5074. // if (left_counts[spliti] > max_count || spliti == NSPLITS-1) {
  5075. // min_pivotx2 = max_axis_min_x2 + spliti*max_axis_length/(NSPANS/ut_BoxCentre<BOX_TYPE>::scale);
  5076. // max_pivotx2 = max_axis_min_x2 + (spliti+1)*max_axis_length/(NSPANS/ut_BoxCentre<BOX_TYPE>::scale);
  5077. // break;
  5078. // }
  5079. //}
  5080. }
  5081. nthElement<T>(boxes,indices,indices+nboxes,max_axis,nth_index);//,min_pivotx2,max_pivotx2);
  5082. split_indices = nth_index;
  5083. Box<T,NAXES> left_box(boxes[indices[0]]);
  5084. for (SRC_INT_TYPE* left_indices = indices+1; left_indices < nth_index; ++left_indices) {
  5085. left_box.combine(boxes[*left_indices]);
  5086. }
  5087. Box<T,NAXES> right_box(boxes[nth_index[0]]);
  5088. for (SRC_INT_TYPE* right_indices = nth_index+1; right_indices < indices_end; ++right_indices) {
  5089. right_box.combine(boxes[*right_indices]);
  5090. }
  5091. split_boxes[0] = left_box;
  5092. split_boxes[1] = right_box;
  5093. }
  5094. else {
  5095. const T pivotx2 = axis_min_x2 + (split_index+1)*axis_length/(NSPANS/ut_BoxCentre<BOX_TYPE>::scale);
  5096. SRC_INT_TYPE* ppivot_start;
  5097. SRC_INT_TYPE* ppivot_end;
  5098. partitionByCentre(boxes,indices,indices+nboxes,max_axis,pivotx2,ppivot_start,ppivot_end);
  5099. split_indices = indices + left_counts[split_index];
  5100. // Ignoring roundoff error, we would have
  5101. // split_indices >= ppivot_start && split_indices <= ppivot_end,
  5102. // but it may not always be in practice.
  5103. if (split_indices >= ppivot_start && split_indices <= ppivot_end) {
  5104. split_boxes[0] = left_boxes[split_index];
  5105. split_boxes[1] = right_boxes[split_index];
  5106. return;
  5107. }
  5108. // Roundoff error changed the split, so we need to recompute the boxes.
  5109. if (split_indices < ppivot_start) {
  5110. split_indices = ppivot_start;
  5111. }
  5112. else {//(split_indices > ppivot_end)
  5113. split_indices = ppivot_end;
  5114. }
  5115. // Emergency checks, just in case
  5116. if (split_indices == indices) {
  5117. ++split_indices;
  5118. }
  5119. else if (split_indices == indices_end) {
  5120. --split_indices;
  5121. }
  5122. Box<T,NAXES> left_box(boxes[indices[0]]);
  5123. for (SRC_INT_TYPE* left_indices = indices+1; left_indices < split_indices; ++left_indices) {
  5124. left_box.combine(boxes[*left_indices]);
  5125. }
  5126. Box<T,NAXES> right_box(boxes[split_indices[0]]);
  5127. for (SRC_INT_TYPE* right_indices = split_indices+1; right_indices < indices_end; ++right_indices) {
  5128. right_box.combine(boxes[*right_indices]);
  5129. }
  5130. split_boxes[0] = left_box;
  5131. split_boxes[1] = right_box;
  5132. }
  5133. }
  5134. template<uint N>
  5135. template<uint PARALLEL_THRESHOLD, typename SRC_INT_TYPE>
  5136. inline void BVH<N>::adjustParallelChildNodes(INT_TYPE nparallel, UT_Array<Node>& nodes, Node& node, UT_Array<Node>* parallel_nodes, SRC_INT_TYPE* sub_indices) noexcept
  5137. {
  5138. // Alec: No need to parallelize this...
  5139. //UTparallelFor(UT_BlockedRange<INT_TYPE>(0,nparallel), [&node,&nodes,&parallel_nodes,&sub_indices](const UT_BlockedRange<INT_TYPE>& r) {
  5140. INT_TYPE counted_parallel = 0;
  5141. INT_TYPE childi = 0;
  5142. for(int taski = 0;taski < nparallel; taski++)
  5143. {
  5144. //for (INT_TYPE taski = r.begin(), end = r.end(); taski < end; ++taski) {
  5145. // First, find which child this is
  5146. INT_TYPE sub_nboxes;
  5147. for (; childi < N; ++childi) {
  5148. sub_nboxes = sub_indices[childi+1]-sub_indices[childi];
  5149. if (sub_nboxes >= PARALLEL_THRESHOLD) {
  5150. if (counted_parallel == taski) {
  5151. break;
  5152. }
  5153. ++counted_parallel;
  5154. }
  5155. }
  5156. UT_ASSERT_P(counted_parallel == taski);
  5157. const UT_Array<Node>& local_nodes = parallel_nodes[counted_parallel];
  5158. INT_TYPE n = local_nodes.size();
  5159. INT_TYPE local_nodes_start = Node::getInternalNum(node.child[childi])+1;
  5160. ++counted_parallel;
  5161. ++childi;
  5162. for (INT_TYPE j = 0; j < n; ++j) {
  5163. Node local_node = local_nodes[j];
  5164. for (INT_TYPE childj = 0; childj < N; ++childj) {
  5165. INT_TYPE local_child = local_node.child[childj];
  5166. if (Node::isInternal(local_child) && local_child != Node::EMPTY) {
  5167. local_child += local_nodes_start;
  5168. local_node.child[childj] = local_child;
  5169. }
  5170. }
  5171. nodes[local_nodes_start+j] = local_node;
  5172. }
  5173. }
  5174. }
  5175. template<uint N>
  5176. template<typename T,typename BOX_TYPE,typename SRC_INT_TYPE>
  5177. void BVH<N>::nthElement(const BOX_TYPE* boxes, SRC_INT_TYPE* indices, const SRC_INT_TYPE* indices_end, const uint axis, SRC_INT_TYPE*const nth) noexcept {//, const T min_pivotx2, const T max_pivotx2) noexcept {
  5178. while (true) {
  5179. // Choose median of first, middle, and last as the pivot
  5180. T pivots[3] = {
  5181. utBoxCenter(boxes[indices[0]], axis),
  5182. utBoxCenter(boxes[indices[(indices_end-indices)/2]], axis),
  5183. utBoxCenter(boxes[*(indices_end-1)], axis)
  5184. };
  5185. if (pivots[0] < pivots[1]) {
  5186. const T temp = pivots[0];
  5187. pivots[0] = pivots[1];
  5188. pivots[1] = temp;
  5189. }
  5190. if (pivots[0] < pivots[2]) {
  5191. const T temp = pivots[0];
  5192. pivots[0] = pivots[2];
  5193. pivots[2] = temp;
  5194. }
  5195. if (pivots[1] < pivots[2]) {
  5196. const T temp = pivots[1];
  5197. pivots[1] = pivots[2];
  5198. pivots[2] = temp;
  5199. }
  5200. T mid_pivotx2 = pivots[1];
  5201. #if 0
  5202. // We limit the pivot, because we know that the true value is between min and max
  5203. if (mid_pivotx2 < min_pivotx2) {
  5204. mid_pivotx2 = min_pivotx2;
  5205. }
  5206. else if (mid_pivotx2 > max_pivotx2) {
  5207. mid_pivotx2 = max_pivotx2;
  5208. }
  5209. #endif
  5210. SRC_INT_TYPE* pivot_start;
  5211. SRC_INT_TYPE* pivot_end;
  5212. partitionByCentre(boxes,indices,indices_end,axis,mid_pivotx2,pivot_start,pivot_end);
  5213. if (nth < pivot_start) {
  5214. indices_end = pivot_start;
  5215. }
  5216. else if (nth < pivot_end) {
  5217. // nth is in the middle of the pivot range,
  5218. // which is in the right place, so we're done.
  5219. return;
  5220. }
  5221. else {
  5222. indices = pivot_end;
  5223. }
  5224. if (indices_end <= indices+1) {
  5225. return;
  5226. }
  5227. }
  5228. }
  5229. template<uint N>
  5230. template<typename T,typename BOX_TYPE,typename SRC_INT_TYPE>
  5231. void BVH<N>::partitionByCentre(const BOX_TYPE* boxes, SRC_INT_TYPE*const indices, const SRC_INT_TYPE*const indices_end, const uint axis, const T pivotx2, SRC_INT_TYPE*& ppivot_start, SRC_INT_TYPE*& ppivot_end) noexcept {
  5232. // TODO: Consider parallelizing this!
  5233. // First element >= pivot
  5234. SRC_INT_TYPE* pivot_start = indices;
  5235. // First element > pivot
  5236. SRC_INT_TYPE* pivot_end = indices;
  5237. // Loop through forward once
  5238. for (SRC_INT_TYPE* psrc_index = indices; psrc_index != indices_end; ++psrc_index) {
  5239. const T srcsum = utBoxCenter(boxes[*psrc_index], axis);
  5240. if (srcsum < pivotx2) {
  5241. if (psrc_index != pivot_start) {
  5242. if (pivot_start == pivot_end) {
  5243. // Common case: nothing equal to the pivot
  5244. const SRC_INT_TYPE temp = *psrc_index;
  5245. *psrc_index = *pivot_start;
  5246. *pivot_start = temp;
  5247. }
  5248. else {
  5249. // Less common case: at least one thing equal to the pivot
  5250. const SRC_INT_TYPE temp = *psrc_index;
  5251. *psrc_index = *pivot_end;
  5252. *pivot_end = *pivot_start;
  5253. *pivot_start = temp;
  5254. }
  5255. }
  5256. ++pivot_start;
  5257. ++pivot_end;
  5258. }
  5259. else if (srcsum == pivotx2) {
  5260. // Add to the pivot area
  5261. if (psrc_index != pivot_end) {
  5262. const SRC_INT_TYPE temp = *psrc_index;
  5263. *psrc_index = *pivot_end;
  5264. *pivot_end = temp;
  5265. }
  5266. ++pivot_end;
  5267. }
  5268. }
  5269. ppivot_start = pivot_start;
  5270. ppivot_end = pivot_end;
  5271. }
  5272. #if 0
  5273. template<uint N>
  5274. void BVH<N>::debugDump() const {
  5275. printf("\nNode 0: {\n");
  5276. UT_WorkBuffer indent;
  5277. indent.append(80, ' ');
  5278. UT_Array<INT_TYPE> stack;
  5279. stack.append(0);
  5280. stack.append(0);
  5281. while (!stack.isEmpty()) {
  5282. int depth = stack.size()/2;
  5283. if (indent.length() < 4*depth) {
  5284. indent.append(4, ' ');
  5285. }
  5286. INT_TYPE cur_nodei = stack[stack.size()-2];
  5287. INT_TYPE cur_i = stack[stack.size()-1];
  5288. if (cur_i == N) {
  5289. printf(indent.buffer()+indent.length()-(4*(depth-1)));
  5290. printf("}\n");
  5291. stack.removeLast();
  5292. stack.removeLast();
  5293. continue;
  5294. }
  5295. ++stack[stack.size()-1];
  5296. Node& cur_node = myRoot[cur_nodei];
  5297. INT_TYPE child_nodei = cur_node.child[cur_i];
  5298. if (Node::isInternal(child_nodei)) {
  5299. if (child_nodei == Node::EMPTY) {
  5300. printf(indent.buffer()+indent.length()-(4*(depth-1)));
  5301. printf("}\n");
  5302. stack.removeLast();
  5303. stack.removeLast();
  5304. continue;
  5305. }
  5306. INT_TYPE internal_node = Node::getInternalNum(child_nodei);
  5307. printf(indent.buffer()+indent.length()-(4*depth));
  5308. printf("Node %u: {\n", uint(internal_node));
  5309. stack.append(internal_node);
  5310. stack.append(0);
  5311. continue;
  5312. }
  5313. else {
  5314. printf(indent.buffer()+indent.length()-(4*depth));
  5315. printf("Tri %u\n", uint(child_nodei));
  5316. }
  5317. }
  5318. }
  5319. #endif
  5320. } // UT namespace
  5321. } // End HDK_Sample namespace
  5322. }}
  5323. #endif
  5324. /*
  5325. * Copyright (c) 2018 Side Effects Software Inc.
  5326. *
  5327. * Permission is hereby granted, free of charge, to any person obtaining a copy
  5328. * of this software and associated documentation files (the "Software"), to deal
  5329. * in the Software without restriction, including without limitation the rights
  5330. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  5331. * copies of the Software, and to permit persons to whom the Software is
  5332. * furnished to do so, subject to the following conditions:
  5333. *
  5334. * The above copyright notice and this permission notice shall be included in all
  5335. * copies or substantial portions of the Software.
  5336. *
  5337. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  5338. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  5339. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  5340. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  5341. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  5342. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  5343. * SOFTWARE.
  5344. *
  5345. * COMMENTS:
  5346. * Functions and structures for computing solid angles.
  5347. */
  5348. #pragma once
  5349. #ifndef __HDK_UT_SolidAngle_h__
  5350. #define __HDK_UT_SolidAngle_h__
  5351. #include <memory>
  5352. namespace igl {
  5353. /// @private
  5354. namespace FastWindingNumber {
  5355. namespace HDK_Sample {
  5356. template<typename T>
  5357. using UT_Vector2T = UT_FixedVector<T,2>;
  5358. template<typename T>
  5359. using UT_Vector3T = UT_FixedVector<T,3>;
  5360. template <typename T>
  5361. SYS_FORCE_INLINE T cross(const UT_Vector2T<T> &v1, const UT_Vector2T<T> &v2)
  5362. {
  5363. return v1[0]*v2[1] - v1[1]*v2[0];
  5364. }
  5365. template <typename T>
  5366. SYS_FORCE_INLINE
  5367. UT_Vector3T<T> cross(const UT_Vector3T<T> &v1, const UT_Vector3T<T> &v2)
  5368. {
  5369. UT_Vector3T<T> result;
  5370. // compute the cross product:
  5371. result[0] = v1[1]*v2[2] - v1[2]*v2[1];
  5372. result[1] = v1[2]*v2[0] - v1[0]*v2[2];
  5373. result[2] = v1[0]*v2[1] - v1[1]*v2[0];
  5374. return result;
  5375. }
  5376. /// Returns the signed solid angle subtended by triangle abc
  5377. /// from query point.
  5378. ///
  5379. /// WARNING: This uses the right-handed normal convention, whereas most of
  5380. /// Houdini uses the left-handed normal convention, so either
  5381. /// negate the output, or swap b and c if you want it to be
  5382. /// positive inside and negative outside.
  5383. template<typename T>
  5384. inline T UTsignedSolidAngleTri(
  5385. const UT_Vector3T<T> &a,
  5386. const UT_Vector3T<T> &b,
  5387. const UT_Vector3T<T> &c,
  5388. const UT_Vector3T<T> &query)
  5389. {
  5390. // Make a, b, and c relative to query
  5391. UT_Vector3T<T> qa = a-query;
  5392. UT_Vector3T<T> qb = b-query;
  5393. UT_Vector3T<T> qc = c-query;
  5394. const T alength = qa.length();
  5395. const T blength = qb.length();
  5396. const T clength = qc.length();
  5397. // If any triangle vertices are coincident with query,
  5398. // query is on the surface, which we treat as no solid angle.
  5399. if (alength == 0 || blength == 0 || clength == 0)
  5400. return T(0);
  5401. // Normalize the vectors
  5402. qa /= alength;
  5403. qb /= blength;
  5404. qc /= clength;
  5405. // The formula on Wikipedia has roughly dot(qa,cross(qb,qc)),
  5406. // but that's unstable when qa, qb, and qc are very close,
  5407. // (e.g. if the input triangle was very far away).
  5408. // This should be equivalent, but more stable.
  5409. const T numerator = dot(qa, cross(qb-qa, qc-qa));
  5410. // If numerator is 0, regardless of denominator, query is on the
  5411. // surface, which we treat as no solid angle.
  5412. if (numerator == 0)
  5413. return T(0);
  5414. const T denominator = T(1) + dot(qa,qb) + dot(qa,qc) + dot(qb,qc);
  5415. return T(2)*SYSatan2(numerator, denominator);
  5416. }
  5417. template<typename T>
  5418. inline T UTsignedSolidAngleQuad(
  5419. const UT_Vector3T<T> &a,
  5420. const UT_Vector3T<T> &b,
  5421. const UT_Vector3T<T> &c,
  5422. const UT_Vector3T<T> &d,
  5423. const UT_Vector3T<T> &query)
  5424. {
  5425. // Make a, b, c, and d relative to query
  5426. UT_Vector3T<T> v[4] = {
  5427. a-query,
  5428. b-query,
  5429. c-query,
  5430. d-query
  5431. };
  5432. const T lengths[4] = {
  5433. v[0].length(),
  5434. v[1].length(),
  5435. v[2].length(),
  5436. v[3].length()
  5437. };
  5438. // If any quad vertices are coincident with query,
  5439. // query is on the surface, which we treat as no solid angle.
  5440. // We could add the contribution from the non-planar part,
  5441. // but in the context of a mesh, we'd still miss some, like
  5442. // we do in the triangle case.
  5443. if (lengths[0] == T(0) || lengths[1] == T(0) || lengths[2] == T(0) || lengths[3] == T(0))
  5444. return T(0);
  5445. // Normalize the vectors
  5446. v[0] /= lengths[0];
  5447. v[1] /= lengths[1];
  5448. v[2] /= lengths[2];
  5449. v[3] /= lengths[3];
  5450. // Compute (unnormalized, but consistently-scaled) barycentric coordinates
  5451. // for the query point inside the tetrahedron of points.
  5452. // If 0 or 4 of the coordinates are positive, (or slightly negative), the
  5453. // query is (approximately) inside, so the choice of triangulation matters.
  5454. // Otherwise, the triangulation doesn't matter.
  5455. const UT_Vector3T<T> diag02 = v[2]-v[0];
  5456. const UT_Vector3T<T> diag13 = v[3]-v[1];
  5457. const UT_Vector3T<T> v01 = v[1]-v[0];
  5458. const UT_Vector3T<T> v23 = v[3]-v[2];
  5459. T bary[4];
  5460. bary[0] = dot(v[3],cross(v23,diag13));
  5461. bary[1] = -dot(v[2],cross(v23,diag02));
  5462. bary[2] = -dot(v[1],cross(v01,diag13));
  5463. bary[3] = dot(v[0],cross(v01,diag02));
  5464. const T dot01 = dot(v[0],v[1]);
  5465. const T dot12 = dot(v[1],v[2]);
  5466. const T dot23 = dot(v[2],v[3]);
  5467. const T dot30 = dot(v[3],v[0]);
  5468. T omega = T(0);
  5469. // Equation of a bilinear patch in barycentric coordinates of its
  5470. // tetrahedron is x0*x2 = x1*x3. Less is one side; greater is other.
  5471. if (bary[0]*bary[2] < bary[1]*bary[3])
  5472. {
  5473. // Split 0-2: triangles 0,1,2 and 0,2,3
  5474. const T numerator012 = bary[3];
  5475. const T numerator023 = bary[1];
  5476. const T dot02 = dot(v[0],v[2]);
  5477. // If numerator is 0, regardless of denominator, query is on the
  5478. // surface, which we treat as no solid angle.
  5479. if (numerator012 != T(0))
  5480. {
  5481. const T denominator012 = T(1) + dot01 + dot12 + dot02;
  5482. omega = SYSatan2(numerator012, denominator012);
  5483. }
  5484. if (numerator023 != T(0))
  5485. {
  5486. const T denominator023 = T(1) + dot02 + dot23 + dot30;
  5487. omega += SYSatan2(numerator023, denominator023);
  5488. }
  5489. }
  5490. else
  5491. {
  5492. // Split 1-3: triangles 0,1,3 and 1,2,3
  5493. const T numerator013 = -bary[2];
  5494. const T numerator123 = -bary[0];
  5495. const T dot13 = dot(v[1],v[3]);
  5496. // If numerator is 0, regardless of denominator, query is on the
  5497. // surface, which we treat as no solid angle.
  5498. if (numerator013 != T(0))
  5499. {
  5500. const T denominator013 = T(1) + dot01 + dot13 + dot30;
  5501. omega = SYSatan2(numerator013, denominator013);
  5502. }
  5503. if (numerator123 != T(0))
  5504. {
  5505. const T denominator123 = T(1) + dot12 + dot23 + dot13;
  5506. omega += SYSatan2(numerator123, denominator123);
  5507. }
  5508. }
  5509. return T(2)*omega;
  5510. }
  5511. /// Class for quickly approximating signed solid angle of a large mesh
  5512. /// from many query points. This is useful for computing the
  5513. /// generalized winding number at many points.
  5514. ///
  5515. /// NOTE: This is currently only instantiated for <float,float>.
  5516. template<typename T,typename S>
  5517. class UT_SolidAngle
  5518. {
  5519. public:
  5520. /// This is outlined so that we don't need to include UT_BVHImpl.h
  5521. inline UT_SolidAngle();
  5522. /// This is outlined so that we don't need to include UT_BVHImpl.h
  5523. inline ~UT_SolidAngle();
  5524. /// NOTE: This does not take ownership over triangle_points or positions,
  5525. /// but does keep pointers to them, so the caller must keep them in
  5526. /// scope for the lifetime of this structure.
  5527. UT_SolidAngle(
  5528. const int ntriangles,
  5529. const int *const triangle_points,
  5530. const int npoints,
  5531. const UT_Vector3T<S> *const positions,
  5532. const int order = 2)
  5533. : UT_SolidAngle()
  5534. { init(ntriangles, triangle_points, npoints, positions, order); }
  5535. /// Initialize the tree and data.
  5536. /// NOTE: It is safe to call init on a UT_SolidAngle that has had init
  5537. /// called on it before, to re-initialize it.
  5538. inline void init(
  5539. const int ntriangles,
  5540. const int *const triangle_points,
  5541. const int npoints,
  5542. const UT_Vector3T<S> *const positions,
  5543. const int order = 2);
  5544. /// Frees myTree and myData, and clears the rest.
  5545. inline void clear();
  5546. /// Returns true if this is clear
  5547. bool isClear() const
  5548. { return myNTriangles == 0; }
  5549. /// Returns an approximation of the signed solid angle of the mesh from the specified query_point
  5550. /// accuracy_scale is the value of (maxP/q) beyond which the approximation of the box will be used.
  5551. inline T computeSolidAngle(const UT_Vector3T<T> &query_point, const T accuracy_scale = T(2.0)) const;
  5552. private:
  5553. struct BoxData;
  5554. static constexpr uint BVH_N = 4;
  5555. UT_BVH<BVH_N> myTree;
  5556. int myNBoxes;
  5557. int myOrder;
  5558. std::unique_ptr<BoxData[]> myData;
  5559. int myNTriangles;
  5560. const int *myTrianglePoints;
  5561. int myNPoints;
  5562. const UT_Vector3T<S> *myPositions;
  5563. };
  5564. template<typename T>
  5565. inline T UTsignedAngleSegment(
  5566. const UT_Vector2T<T> &a,
  5567. const UT_Vector2T<T> &b,
  5568. const UT_Vector2T<T> &query)
  5569. {
  5570. // Make a and b relative to query
  5571. UT_Vector2T<T> qa = a-query;
  5572. UT_Vector2T<T> qb = b-query;
  5573. // If any segment vertices are coincident with query,
  5574. // query is on the segment, which we treat as no angle.
  5575. if (qa.isZero() || qb.isZero())
  5576. return T(0);
  5577. // numerator = |qa||qb|sin(theta)
  5578. const T numerator = cross(qa, qb);
  5579. // If numerator is 0, regardless of denominator, query is on the
  5580. // surface, which we treat as no solid angle.
  5581. if (numerator == 0)
  5582. return T(0);
  5583. // denominator = |qa||qb|cos(theta)
  5584. const T denominator = dot(qa,qb);
  5585. // numerator/denominator = tan(theta)
  5586. return SYSatan2(numerator, denominator);
  5587. }
  5588. /// Class for quickly approximating signed subtended angle of a large curve
  5589. /// from many query points. This is useful for computing the
  5590. /// generalized winding number at many points.
  5591. ///
  5592. /// NOTE: This is currently only instantiated for <float,float>.
  5593. template<typename T,typename S>
  5594. class UT_SubtendedAngle
  5595. {
  5596. public:
  5597. /// This is outlined so that we don't need to include UT_BVHImpl.h
  5598. inline UT_SubtendedAngle();
  5599. /// This is outlined so that we don't need to include UT_BVHImpl.h
  5600. inline ~UT_SubtendedAngle();
  5601. /// NOTE: This does not take ownership over segment_points or positions,
  5602. /// but does keep pointers to them, so the caller must keep them in
  5603. /// scope for the lifetime of this structure.
  5604. UT_SubtendedAngle(
  5605. const int nsegments,
  5606. const int *const segment_points,
  5607. const int npoints,
  5608. const UT_Vector2T<S> *const positions,
  5609. const int order = 2)
  5610. : UT_SubtendedAngle()
  5611. { init(nsegments, segment_points, npoints, positions, order); }
  5612. /// Initialize the tree and data.
  5613. /// NOTE: It is safe to call init on a UT_SolidAngle that has had init
  5614. /// called on it before, to re-initialize it.
  5615. inline void init(
  5616. const int nsegments,
  5617. const int *const segment_points,
  5618. const int npoints,
  5619. const UT_Vector2T<S> *const positions,
  5620. const int order = 2);
  5621. /// Frees myTree and myData, and clears the rest.
  5622. inline void clear();
  5623. /// Returns true if this is clear
  5624. bool isClear() const
  5625. { return myNSegments == 0; }
  5626. /// Returns an approximation of the signed solid angle of the mesh from the specified query_point
  5627. /// accuracy_scale is the value of (maxP/q) beyond which the approximation of the box will be used.
  5628. inline T computeAngle(const UT_Vector2T<T> &query_point, const T accuracy_scale = T(2.0)) const;
  5629. private:
  5630. struct BoxData;
  5631. static constexpr uint BVH_N = 4;
  5632. UT_BVH<BVH_N> myTree;
  5633. int myNBoxes;
  5634. int myOrder;
  5635. std::unique_ptr<BoxData[]> myData;
  5636. int myNSegments;
  5637. const int *mySegmentPoints;
  5638. int myNPoints;
  5639. const UT_Vector2T<S> *myPositions;
  5640. };
  5641. } // End HDK_Sample namespace
  5642. }}
  5643. #endif
  5644. /*
  5645. * Copyright (c) 2018 Side Effects Software Inc.
  5646. *
  5647. * Permission is hereby granted, free of charge, to any person obtaining a copy
  5648. * of this software and associated documentation files (the "Software"), to deal
  5649. * in the Software without restriction, including without limitation the rights
  5650. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  5651. * copies of the Software, and to permit persons to whom the Software is
  5652. * furnished to do so, subject to the following conditions:
  5653. *
  5654. * The above copyright notice and this permission notice shall be included in all
  5655. * copies or substantial portions of the Software.
  5656. *
  5657. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  5658. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  5659. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  5660. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  5661. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  5662. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  5663. * SOFTWARE.
  5664. *
  5665. * COMMENTS:
  5666. * A wrapper function for the "free" function, used by UT_(Small)Array
  5667. */
  5668. #include <stdlib.h>
  5669. namespace igl {
  5670. /// @private
  5671. namespace FastWindingNumber {
  5672. // This needs to be here or else the warning suppression doesn't work because
  5673. // the templated calling code won't otherwise be compiled until after we've
  5674. // already popped the warning.state. So we just always disable this at file
  5675. // scope here.
  5676. #if defined(__GNUC__) && !defined(__clang__)
  5677. _Pragma("GCC diagnostic push")
  5678. _Pragma("GCC diagnostic ignored \"-Wfree-nonheap-object\"")
  5679. #endif
  5680. inline void ut_ArrayImplFree(void *p)
  5681. {
  5682. free(p);
  5683. }
  5684. #if defined(__GNUC__) && !defined(__clang__)
  5685. _Pragma("GCC diagnostic pop")
  5686. #endif
  5687. } }
  5688. /*
  5689. * Copyright (c) 2018 Side Effects Software Inc.
  5690. *
  5691. * Permission is hereby granted, free of charge, to any person obtaining a copy
  5692. * of this software and associated documentation files (the "Software"), to deal
  5693. * in the Software without restriction, including without limitation the rights
  5694. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  5695. * copies of the Software, and to permit persons to whom the Software is
  5696. * furnished to do so, subject to the following conditions:
  5697. *
  5698. * The above copyright notice and this permission notice shall be included in all
  5699. * copies or substantial portions of the Software.
  5700. *
  5701. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  5702. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  5703. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  5704. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  5705. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  5706. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  5707. * SOFTWARE.
  5708. *
  5709. * COMMENTS:
  5710. * Functions and structures for computing solid angles.
  5711. */
  5712. #include "parallel_for.h"
  5713. #include <type_traits>
  5714. #include <utility>
  5715. #define SOLID_ANGLE_TIME_PRECOMPUTE 0
  5716. #if SOLID_ANGLE_TIME_PRECOMPUTE
  5717. #include <UT/UT_StopWatch.h>
  5718. #endif
  5719. #define SOLID_ANGLE_DEBUG 0
  5720. #if SOLID_ANGLE_DEBUG
  5721. #include <UT/UT_Debug.h>
  5722. #endif
  5723. #define TAYLOR_SERIES_ORDER 2
  5724. namespace igl {
  5725. /// @private
  5726. namespace FastWindingNumber {
  5727. namespace HDK_Sample {
  5728. template<typename T,typename S>
  5729. struct UT_SolidAngle<T,S>::BoxData
  5730. {
  5731. void clear()
  5732. {
  5733. // Set everything to zero
  5734. memset(this,0,sizeof(*this));
  5735. }
  5736. using Type = typename std::conditional<BVH_N==4 && std::is_same<T,float>::value, v4uf, UT_FixedVector<T,BVH_N>>::type;
  5737. using SType = typename std::conditional<BVH_N==4 && std::is_same<S,float>::value, v4uf, UT_FixedVector<S,BVH_N>>::type;
  5738. /// An upper bound on the squared distance from myAverageP to the farthest point in the box.
  5739. SType myMaxPDist2;
  5740. /// Centre of mass of the mesh surface in this box
  5741. UT_FixedVector<Type,3> myAverageP;
  5742. /// Unnormalized, area-weighted normal of the mesh in this box
  5743. UT_FixedVector<Type,3> myN;
  5744. #if TAYLOR_SERIES_ORDER >= 1
  5745. /// Values for Omega_1
  5746. /// @{
  5747. UT_FixedVector<Type,3> myNijDiag; // Nxx, Nyy, Nzz
  5748. Type myNxy_Nyx; // Nxy+Nyx
  5749. Type myNyz_Nzy; // Nyz+Nzy
  5750. Type myNzx_Nxz; // Nzx+Nxz
  5751. /// @}
  5752. #endif
  5753. #if TAYLOR_SERIES_ORDER >= 2
  5754. /// Values for Omega_2
  5755. /// @{
  5756. UT_FixedVector<Type,3> myNijkDiag; // Nxxx, Nyyy, Nzzz
  5757. Type mySumPermuteNxyz; // (Nxyz+Nxzy+Nyzx+Nyxz+Nzxy+Nzyx) = 2*(Nxyz+Nyzx+Nzxy)
  5758. Type my2Nxxy_Nyxx; // Nxxy+Nxyx+Nyxx = 2Nxxy+Nyxx
  5759. Type my2Nxxz_Nzxx; // Nxxz+Nxzx+Nzxx = 2Nxxz+Nzxx
  5760. Type my2Nyyz_Nzyy; // Nyyz+Nyzy+Nzyy = 2Nyyz+Nzyy
  5761. Type my2Nyyx_Nxyy; // Nyyx+Nyxy+Nxyy = 2Nyyx+Nxyy
  5762. Type my2Nzzx_Nxzz; // Nzzx+Nzxz+Nxzz = 2Nzzx+Nxzz
  5763. Type my2Nzzy_Nyzz; // Nzzy+Nzyz+Nyzz = 2Nzzy+Nyzz
  5764. /// @}
  5765. #endif
  5766. };
  5767. template<typename T,typename S>
  5768. inline UT_SolidAngle<T,S>::UT_SolidAngle()
  5769. : myTree()
  5770. , myNBoxes(0)
  5771. , myOrder(2)
  5772. , myData(nullptr)
  5773. , myNTriangles(0)
  5774. , myTrianglePoints(nullptr)
  5775. , myNPoints(0)
  5776. , myPositions(nullptr)
  5777. {}
  5778. template<typename T,typename S>
  5779. inline UT_SolidAngle<T,S>::~UT_SolidAngle()
  5780. {
  5781. // Default destruction works, but this needs to be outlined
  5782. // to avoid having to include UT_BVHImpl.h in the header,
  5783. // (for the UT_UniquePtr destructor.)
  5784. }
  5785. template<typename T,typename S>
  5786. inline void UT_SolidAngle<T,S>::init(
  5787. const int ntriangles,
  5788. const int *const triangle_points,
  5789. const int npoints,
  5790. const UT_Vector3T<S> *const positions,
  5791. const int order)
  5792. {
  5793. #if SOLID_ANGLE_DEBUG
  5794. UTdebugFormat("");
  5795. UTdebugFormat("");
  5796. UTdebugFormat("Building BVH for {} ntriangles on {} points:", ntriangles, npoints);
  5797. #endif
  5798. myOrder = order;
  5799. myNTriangles = ntriangles;
  5800. myTrianglePoints = triangle_points;
  5801. myNPoints = npoints;
  5802. myPositions = positions;
  5803. #if SOLID_ANGLE_TIME_PRECOMPUTE
  5804. UT_StopWatch timer;
  5805. timer.start();
  5806. #endif
  5807. UT_SmallArray<UT::Box<S,3>> triangle_boxes;
  5808. triangle_boxes.setSizeNoInit(ntriangles);
  5809. if (ntriangles < 16*1024)
  5810. {
  5811. const int *cur_triangle_points = triangle_points;
  5812. for (int i = 0; i < ntriangles; ++i, cur_triangle_points += 3)
  5813. {
  5814. UT::Box<S,3> &box = triangle_boxes[i];
  5815. box.initBounds(positions[cur_triangle_points[0]]);
  5816. box.enlargeBounds(positions[cur_triangle_points[1]]);
  5817. box.enlargeBounds(positions[cur_triangle_points[2]]);
  5818. }
  5819. }
  5820. else
  5821. {
  5822. igl::parallel_for(ntriangles,
  5823. [triangle_points,&triangle_boxes,positions](int i)
  5824. {
  5825. const int *cur_triangle_points = triangle_points + i*3;
  5826. UT::Box<S,3> &box = triangle_boxes[i];
  5827. box.initBounds(positions[cur_triangle_points[0]]);
  5828. box.enlargeBounds(positions[cur_triangle_points[1]]);
  5829. box.enlargeBounds(positions[cur_triangle_points[2]]);
  5830. });
  5831. }
  5832. #if SOLID_ANGLE_TIME_PRECOMPUTE
  5833. double time = timer.stop();
  5834. UTdebugFormat("{} s to create bounding boxes.", time);
  5835. timer.start();
  5836. #endif
  5837. myTree.template init<UT::BVH_Heuristic::BOX_AREA,S,3>(triangle_boxes.array(), ntriangles);
  5838. #if SOLID_ANGLE_TIME_PRECOMPUTE
  5839. time = timer.stop();
  5840. UTdebugFormat("{} s to initialize UT_BVH structure. {} nodes", time, myTree.getNumNodes());
  5841. #endif
  5842. //myTree.debugDump();
  5843. const int nnodes = myTree.getNumNodes();
  5844. myNBoxes = nnodes;
  5845. BoxData *box_data = new BoxData[nnodes];
  5846. myData.reset(box_data);
  5847. // Some data are only needed during initialization.
  5848. struct LocalData
  5849. {
  5850. // Bounding box
  5851. UT::Box<S,3> myBox;
  5852. // P and N are needed from each child for computing Nij.
  5853. UT_Vector3T<T> myAverageP;
  5854. UT_Vector3T<T> myAreaP;
  5855. UT_Vector3T<T> myN;
  5856. // Unsigned area is needed for computing the average position.
  5857. T myArea;
  5858. #if TAYLOR_SERIES_ORDER >= 1
  5859. // These are needed for computing Nijk.
  5860. UT_Vector3T<T> myNijDiag;
  5861. T myNxy; T myNyx;
  5862. T myNyz; T myNzy;
  5863. T myNzx; T myNxz;
  5864. #endif
  5865. #if TAYLOR_SERIES_ORDER >= 2
  5866. UT_Vector3T<T> myNijkDiag; // Nxxx, Nyyy, Nzzz
  5867. T mySumPermuteNxyz; // (Nxyz+Nxzy+Nyzx+Nyxz+Nzxy+Nzyx) = 2*(Nxyz+Nyzx+Nzxy)
  5868. T my2Nxxy_Nyxx; // Nxxy+Nxyx+Nyxx = 2Nxxy+Nyxx
  5869. T my2Nxxz_Nzxx; // Nxxz+Nxzx+Nzxx = 2Nxxz+Nzxx
  5870. T my2Nyyz_Nzyy; // Nyyz+Nyzy+Nzyy = 2Nyyz+Nzyy
  5871. T my2Nyyx_Nxyy; // Nyyx+Nyxy+Nxyy = 2Nyyx+Nxyy
  5872. T my2Nzzx_Nxzz; // Nzzx+Nzxz+Nxzz = 2Nzzx+Nxzz
  5873. T my2Nzzy_Nyzz; // Nzzy+Nzyz+Nyzz = 2Nzzy+Nyzz
  5874. #endif
  5875. };
  5876. struct PrecomputeFunctors
  5877. {
  5878. BoxData *const myBoxData;
  5879. const UT::Box<S,3> *const myTriangleBoxes;
  5880. const int *const myTrianglePoints;
  5881. const UT_Vector3T<S> *const myPositions;
  5882. const int myOrder;
  5883. PrecomputeFunctors(
  5884. BoxData *box_data,
  5885. const UT::Box<S,3> *triangle_boxes,
  5886. const int *triangle_points,
  5887. const UT_Vector3T<S> *positions,
  5888. const int order)
  5889. : myBoxData(box_data)
  5890. , myTriangleBoxes(triangle_boxes)
  5891. , myTrianglePoints(triangle_points)
  5892. , myPositions(positions)
  5893. , myOrder(order)
  5894. {}
  5895. constexpr SYS_FORCE_INLINE bool pre(const int nodei, LocalData *data_for_parent) const
  5896. {
  5897. return true;
  5898. }
  5899. void item(const int itemi, const int parent_nodei, LocalData &data_for_parent) const
  5900. {
  5901. const UT_Vector3T<S> *const positions = myPositions;
  5902. const int *const cur_triangle_points = myTrianglePoints + 3*itemi;
  5903. const UT_Vector3T<T> a = positions[cur_triangle_points[0]];
  5904. const UT_Vector3T<T> b = positions[cur_triangle_points[1]];
  5905. const UT_Vector3T<T> c = positions[cur_triangle_points[2]];
  5906. const UT_Vector3T<T> ab = b-a;
  5907. const UT_Vector3T<T> ac = c-a;
  5908. const UT::Box<S,3> &triangle_box = myTriangleBoxes[itemi];
  5909. data_for_parent.myBox.initBounds(triangle_box.getMin(), triangle_box.getMax());
  5910. // Area-weighted normal (unnormalized)
  5911. const UT_Vector3T<T> N = T(0.5)*cross(ab,ac);
  5912. const T area2 = N.length2();
  5913. const T area = SYSsqrt(area2);
  5914. const UT_Vector3T<T> P = (a+b+c)/3;
  5915. data_for_parent.myAverageP = P;
  5916. data_for_parent.myAreaP = P*area;
  5917. data_for_parent.myN = N;
  5918. #if SOLID_ANGLE_DEBUG
  5919. UTdebugFormat("");
  5920. UTdebugFormat("Triangle {}: P = {}; N = {}; area = {}", itemi, P, N, area);
  5921. UTdebugFormat(" box = {}", data_for_parent.myBox);
  5922. #endif
  5923. data_for_parent.myArea = area;
  5924. #if TAYLOR_SERIES_ORDER >= 1
  5925. const int order = myOrder;
  5926. if (order < 1)
  5927. return;
  5928. // NOTE: Due to P being at the centroid, triangles have Nij = 0
  5929. // contributions to Nij.
  5930. data_for_parent.myNijDiag = T(0);
  5931. data_for_parent.myNxy = 0; data_for_parent.myNyx = 0;
  5932. data_for_parent.myNyz = 0; data_for_parent.myNzy = 0;
  5933. data_for_parent.myNzx = 0; data_for_parent.myNxz = 0;
  5934. #endif
  5935. #if TAYLOR_SERIES_ORDER >= 2
  5936. if (order < 2)
  5937. return;
  5938. // If it's zero-length, the results are zero, so we can skip.
  5939. if (area == 0)
  5940. {
  5941. data_for_parent.myNijkDiag = T(0);
  5942. data_for_parent.mySumPermuteNxyz = 0;
  5943. data_for_parent.my2Nxxy_Nyxx = 0;
  5944. data_for_parent.my2Nxxz_Nzxx = 0;
  5945. data_for_parent.my2Nyyz_Nzyy = 0;
  5946. data_for_parent.my2Nyyx_Nxyy = 0;
  5947. data_for_parent.my2Nzzx_Nxzz = 0;
  5948. data_for_parent.my2Nzzy_Nyzz = 0;
  5949. return;
  5950. }
  5951. // We need to use the NORMALIZED normal to multiply the integrals by.
  5952. UT_Vector3T<T> n = N/area;
  5953. // Figure out the order of a, b, and c in x, y, and z
  5954. // for use in computing the integrals for Nijk.
  5955. UT_Vector3T<T> values[3] = {a, b, c};
  5956. int order_x[3] = {0,1,2};
  5957. if (a[0] > b[0])
  5958. std::swap(order_x[0],order_x[1]);
  5959. if (values[order_x[0]][0] > c[0])
  5960. std::swap(order_x[0],order_x[2]);
  5961. if (values[order_x[1]][0] > values[order_x[2]][0])
  5962. std::swap(order_x[1],order_x[2]);
  5963. T dx = values[order_x[2]][0] - values[order_x[0]][0];
  5964. int order_y[3] = {0,1,2};
  5965. if (a[1] > b[1])
  5966. std::swap(order_y[0],order_y[1]);
  5967. if (values[order_y[0]][1] > c[1])
  5968. std::swap(order_y[0],order_y[2]);
  5969. if (values[order_y[1]][1] > values[order_y[2]][1])
  5970. std::swap(order_y[1],order_y[2]);
  5971. T dy = values[order_y[2]][1] - values[order_y[0]][1];
  5972. int order_z[3] = {0,1,2};
  5973. if (a[2] > b[2])
  5974. std::swap(order_z[0],order_z[1]);
  5975. if (values[order_z[0]][2] > c[2])
  5976. std::swap(order_z[0],order_z[2]);
  5977. if (values[order_z[1]][2] > values[order_z[2]][2])
  5978. std::swap(order_z[1],order_z[2]);
  5979. T dz = values[order_z[2]][2] - values[order_z[0]][2];
  5980. auto &&compute_integrals = [](
  5981. const UT_Vector3T<T> &a,
  5982. const UT_Vector3T<T> &b,
  5983. const UT_Vector3T<T> &c,
  5984. const UT_Vector3T<T> &P,
  5985. T *integral_ii,
  5986. T *integral_ij,
  5987. T *integral_ik,
  5988. const int i)
  5989. {
  5990. #if SOLID_ANGLE_DEBUG
  5991. UTdebugFormat(" Splitting on {}; a = {}; b = {}; c = {}", char('x'+i), a, b, c);
  5992. #endif
  5993. // NOTE: a, b, and c must be in order of the i axis.
  5994. // We're splitting the triangle at the middle i coordinate.
  5995. const UT_Vector3T<T> oab = b - a;
  5996. const UT_Vector3T<T> oac = c - a;
  5997. const UT_Vector3T<T> ocb = b - c;
  5998. UT_ASSERT_MSG_P(oac[i] > 0, "This should have been checked by the caller.");
  5999. const T t = oab[i]/oac[i];
  6000. UT_ASSERT_MSG_P(t >= 0 && t <= 1, "Either sorting must have gone wrong, or there are input NaNs.");
  6001. const int j = (i==2) ? 0 : (i+1);
  6002. const int k = (j==2) ? 0 : (j+1);
  6003. const T jdiff = t*oac[j] - oab[j];
  6004. const T kdiff = t*oac[k] - oab[k];
  6005. UT_Vector3T<T> cross_a;
  6006. cross_a[0] = (jdiff*oab[k] - kdiff*oab[j]);
  6007. cross_a[1] = kdiff*oab[i];
  6008. cross_a[2] = jdiff*oab[i];
  6009. UT_Vector3T<T> cross_c;
  6010. cross_c[0] = (jdiff*ocb[k] - kdiff*ocb[j]);
  6011. cross_c[1] = kdiff*ocb[i];
  6012. cross_c[2] = jdiff*ocb[i];
  6013. const T area_scale_a = cross_a.length();
  6014. const T area_scale_c = cross_c.length();
  6015. const T Pai = a[i] - P[i];
  6016. const T Pci = c[i] - P[i];
  6017. // Integral over the area of the triangle of (pi^2)dA,
  6018. // by splitting the triangle into two at b, the a side
  6019. // and the c side.
  6020. const T int_ii_a = area_scale_a*(T(0.5)*Pai*Pai + T(2.0/3.0)*Pai*oab[i] + T(0.25)*oab[i]*oab[i]);
  6021. const T int_ii_c = area_scale_c*(T(0.5)*Pci*Pci + T(2.0/3.0)*Pci*ocb[i] + T(0.25)*ocb[i]*ocb[i]);
  6022. *integral_ii = int_ii_a + int_ii_c;
  6023. #if SOLID_ANGLE_DEBUG
  6024. UTdebugFormat(" integral_{}{}_a = {}; integral_{}{}_c = {}", char('x'+i), char('x'+i), int_ii_a, char('x'+i), char('x'+i), int_ii_c);
  6025. #endif
  6026. int jk = j;
  6027. T *integral = integral_ij;
  6028. T diff = jdiff;
  6029. while (true) // This only does 2 iterations, one for j and one for k
  6030. {
  6031. if (integral)
  6032. {
  6033. T obmidj = b[jk] + T(0.5)*diff;
  6034. T oabmidj = obmidj - a[jk];
  6035. T ocbmidj = obmidj - c[jk];
  6036. T Paj = a[jk] - P[jk];
  6037. T Pcj = c[jk] - P[jk];
  6038. // Integral over the area of the triangle of (pi*pj)dA
  6039. const T int_ij_a = area_scale_a*(T(0.5)*Pai*Paj + T(1.0/3.0)*Pai*oabmidj + T(1.0/3.0)*Paj*oab[i] + T(0.25)*oab[i]*oabmidj);
  6040. const T int_ij_c = area_scale_c*(T(0.5)*Pci*Pcj + T(1.0/3.0)*Pci*ocbmidj + T(1.0/3.0)*Pcj*ocb[i] + T(0.25)*ocb[i]*ocbmidj);
  6041. *integral = int_ij_a + int_ij_c;
  6042. #if SOLID_ANGLE_DEBUG
  6043. UTdebugFormat(" integral_{}{}_a = {}; integral_{}{}_c = {}", char('x'+i), char('x'+jk), int_ij_a, char('x'+i), char('x'+jk), int_ij_c);
  6044. #endif
  6045. }
  6046. if (jk == k)
  6047. break;
  6048. jk = k;
  6049. integral = integral_ik;
  6050. diff = kdiff;
  6051. }
  6052. };
  6053. T integral_xx = 0;
  6054. T integral_xy = 0;
  6055. T integral_yy = 0;
  6056. T integral_yz = 0;
  6057. T integral_zz = 0;
  6058. T integral_zx = 0;
  6059. // Note that if the span of any axis is zero, the integral must be zero,
  6060. // since there's a factor of (p_i-P_i), i.e. value minus average,
  6061. // and every value must be equal to the average, giving zero.
  6062. if (dx > 0)
  6063. {
  6064. compute_integrals(
  6065. values[order_x[0]], values[order_x[1]], values[order_x[2]], P,
  6066. &integral_xx, ((dx >= dy && dy > 0) ? &integral_xy : nullptr), ((dx >= dz && dz > 0) ? &integral_zx : nullptr), 0);
  6067. }
  6068. if (dy > 0)
  6069. {
  6070. compute_integrals(
  6071. values[order_y[0]], values[order_y[1]], values[order_y[2]], P,
  6072. &integral_yy, ((dy >= dz && dz > 0) ? &integral_yz : nullptr), ((dx < dy && dx > 0) ? &integral_xy : nullptr), 1);
  6073. }
  6074. if (dz > 0)
  6075. {
  6076. compute_integrals(
  6077. values[order_z[0]], values[order_z[1]], values[order_z[2]], P,
  6078. &integral_zz, ((dx < dz && dx > 0) ? &integral_zx : nullptr), ((dy < dz && dy > 0) ? &integral_yz : nullptr), 2);
  6079. }
  6080. UT_Vector3T<T> Niii;
  6081. Niii[0] = integral_xx;
  6082. Niii[1] = integral_yy;
  6083. Niii[2] = integral_zz;
  6084. Niii *= n;
  6085. data_for_parent.myNijkDiag = Niii;
  6086. data_for_parent.mySumPermuteNxyz = 2*(n[0]*integral_yz + n[1]*integral_zx + n[2]*integral_xy);
  6087. T Nxxy = n[0]*integral_xy;
  6088. T Nxxz = n[0]*integral_zx;
  6089. T Nyyz = n[1]*integral_yz;
  6090. T Nyyx = n[1]*integral_xy;
  6091. T Nzzx = n[2]*integral_zx;
  6092. T Nzzy = n[2]*integral_yz;
  6093. data_for_parent.my2Nxxy_Nyxx = 2*Nxxy + n[1]*integral_xx;
  6094. data_for_parent.my2Nxxz_Nzxx = 2*Nxxz + n[2]*integral_xx;
  6095. data_for_parent.my2Nyyz_Nzyy = 2*Nyyz + n[2]*integral_yy;
  6096. data_for_parent.my2Nyyx_Nxyy = 2*Nyyx + n[0]*integral_yy;
  6097. data_for_parent.my2Nzzx_Nxzz = 2*Nzzx + n[0]*integral_zz;
  6098. data_for_parent.my2Nzzy_Nyzz = 2*Nzzy + n[1]*integral_zz;
  6099. #if SOLID_ANGLE_DEBUG
  6100. UTdebugFormat(" integral_xx = {}; yy = {}; zz = {}", integral_xx, integral_yy, integral_zz);
  6101. UTdebugFormat(" integral_xy = {}; yz = {}; zx = {}", integral_xy, integral_yz, integral_zx);
  6102. #endif
  6103. #endif
  6104. }
  6105. void post(const int nodei, const int parent_nodei, LocalData *data_for_parent, const int nchildren, const LocalData *child_data_array) const
  6106. {
  6107. // NOTE: Although in the general case, data_for_parent may be null for the root call,
  6108. // this functor assumes that it's non-null, so the call below must pass a non-null pointer.
  6109. BoxData &current_box_data = myBoxData[nodei];
  6110. UT_Vector3T<T> N = child_data_array[0].myN;
  6111. ((T*)&current_box_data.myN[0])[0] = N[0];
  6112. ((T*)&current_box_data.myN[1])[0] = N[1];
  6113. ((T*)&current_box_data.myN[2])[0] = N[2];
  6114. UT_Vector3T<T> areaP = child_data_array[0].myAreaP;
  6115. T area = child_data_array[0].myArea;
  6116. UT_Vector3T<T> local_P = child_data_array[0].myAverageP;
  6117. ((T*)&current_box_data.myAverageP[0])[0] = local_P[0];
  6118. ((T*)&current_box_data.myAverageP[1])[0] = local_P[1];
  6119. ((T*)&current_box_data.myAverageP[2])[0] = local_P[2];
  6120. for (int i = 1; i < nchildren; ++i)
  6121. {
  6122. const UT_Vector3T<T> local_N = child_data_array[i].myN;
  6123. N += local_N;
  6124. ((T*)&current_box_data.myN[0])[i] = local_N[0];
  6125. ((T*)&current_box_data.myN[1])[i] = local_N[1];
  6126. ((T*)&current_box_data.myN[2])[i] = local_N[2];
  6127. areaP += child_data_array[i].myAreaP;
  6128. area += child_data_array[i].myArea;
  6129. const UT_Vector3T<T> local_P = child_data_array[i].myAverageP;
  6130. ((T*)&current_box_data.myAverageP[0])[i] = local_P[0];
  6131. ((T*)&current_box_data.myAverageP[1])[i] = local_P[1];
  6132. ((T*)&current_box_data.myAverageP[2])[i] = local_P[2];
  6133. }
  6134. for (int i = nchildren; i < BVH_N; ++i)
  6135. {
  6136. // Set to zero, just to avoid false positives for uses of uninitialized memory.
  6137. ((T*)&current_box_data.myN[0])[i] = 0;
  6138. ((T*)&current_box_data.myN[1])[i] = 0;
  6139. ((T*)&current_box_data.myN[2])[i] = 0;
  6140. ((T*)&current_box_data.myAverageP[0])[i] = 0;
  6141. ((T*)&current_box_data.myAverageP[1])[i] = 0;
  6142. ((T*)&current_box_data.myAverageP[2])[i] = 0;
  6143. }
  6144. data_for_parent->myN = N;
  6145. data_for_parent->myAreaP = areaP;
  6146. data_for_parent->myArea = area;
  6147. UT::Box<S,3> box(child_data_array[0].myBox);
  6148. for (int i = 1; i < nchildren; ++i)
  6149. box.enlargeBounds(child_data_array[i].myBox);
  6150. // Normalize P
  6151. UT_Vector3T<T> averageP;
  6152. if (area > 0)
  6153. averageP = areaP/area;
  6154. else
  6155. averageP = T(0.5)*(box.getMin() + box.getMax());
  6156. data_for_parent->myAverageP = averageP;
  6157. data_for_parent->myBox = box;
  6158. for (int i = 0; i < nchildren; ++i)
  6159. {
  6160. const UT::Box<S,3> &local_box(child_data_array[i].myBox);
  6161. const UT_Vector3T<T> &local_P = child_data_array[i].myAverageP;
  6162. const UT_Vector3T<T> maxPDiff = SYSmax(local_P-UT_Vector3T<T>(local_box.getMin()), UT_Vector3T<T>(local_box.getMax())-local_P);
  6163. ((T*)&current_box_data.myMaxPDist2)[i] = maxPDiff.length2();
  6164. }
  6165. for (int i = nchildren; i < BVH_N; ++i)
  6166. {
  6167. // This child is non-existent. If we set myMaxPDist2 to infinity, it will never
  6168. // use the approximation, and the traverseVector function can check for EMPTY.
  6169. ((T*)&current_box_data.myMaxPDist2)[i] = std::numeric_limits<T>::infinity();
  6170. }
  6171. #if TAYLOR_SERIES_ORDER >= 1
  6172. const int order = myOrder;
  6173. if (order >= 1)
  6174. {
  6175. // We now have the current box's P, so we can adjust Nij and Nijk
  6176. data_for_parent->myNijDiag = child_data_array[0].myNijDiag;
  6177. data_for_parent->myNxy = 0;
  6178. data_for_parent->myNyx = 0;
  6179. data_for_parent->myNyz = 0;
  6180. data_for_parent->myNzy = 0;
  6181. data_for_parent->myNzx = 0;
  6182. data_for_parent->myNxz = 0;
  6183. #if TAYLOR_SERIES_ORDER >= 2
  6184. data_for_parent->myNijkDiag = child_data_array[0].myNijkDiag;
  6185. data_for_parent->mySumPermuteNxyz = child_data_array[0].mySumPermuteNxyz;
  6186. data_for_parent->my2Nxxy_Nyxx = child_data_array[0].my2Nxxy_Nyxx;
  6187. data_for_parent->my2Nxxz_Nzxx = child_data_array[0].my2Nxxz_Nzxx;
  6188. data_for_parent->my2Nyyz_Nzyy = child_data_array[0].my2Nyyz_Nzyy;
  6189. data_for_parent->my2Nyyx_Nxyy = child_data_array[0].my2Nyyx_Nxyy;
  6190. data_for_parent->my2Nzzx_Nxzz = child_data_array[0].my2Nzzx_Nxzz;
  6191. data_for_parent->my2Nzzy_Nyzz = child_data_array[0].my2Nzzy_Nyzz;
  6192. #endif
  6193. for (int i = 1; i < nchildren; ++i)
  6194. {
  6195. data_for_parent->myNijDiag += child_data_array[i].myNijDiag;
  6196. #if TAYLOR_SERIES_ORDER >= 2
  6197. data_for_parent->myNijkDiag += child_data_array[i].myNijkDiag;
  6198. data_for_parent->mySumPermuteNxyz += child_data_array[i].mySumPermuteNxyz;
  6199. data_for_parent->my2Nxxy_Nyxx += child_data_array[i].my2Nxxy_Nyxx;
  6200. data_for_parent->my2Nxxz_Nzxx += child_data_array[i].my2Nxxz_Nzxx;
  6201. data_for_parent->my2Nyyz_Nzyy += child_data_array[i].my2Nyyz_Nzyy;
  6202. data_for_parent->my2Nyyx_Nxyy += child_data_array[i].my2Nyyx_Nxyy;
  6203. data_for_parent->my2Nzzx_Nxzz += child_data_array[i].my2Nzzx_Nxzz;
  6204. data_for_parent->my2Nzzy_Nyzz += child_data_array[i].my2Nzzy_Nyzz;
  6205. #endif
  6206. }
  6207. for (int j = 0; j < 3; ++j)
  6208. ((T*)&current_box_data.myNijDiag[j])[0] = child_data_array[0].myNijDiag[j];
  6209. ((T*)&current_box_data.myNxy_Nyx)[0] = child_data_array[0].myNxy + child_data_array[0].myNyx;
  6210. ((T*)&current_box_data.myNyz_Nzy)[0] = child_data_array[0].myNyz + child_data_array[0].myNzy;
  6211. ((T*)&current_box_data.myNzx_Nxz)[0] = child_data_array[0].myNzx + child_data_array[0].myNxz;
  6212. for (int j = 0; j < 3; ++j)
  6213. ((T*)&current_box_data.myNijkDiag[j])[0] = child_data_array[0].myNijkDiag[j];
  6214. ((T*)&current_box_data.mySumPermuteNxyz)[0] = child_data_array[0].mySumPermuteNxyz;
  6215. ((T*)&current_box_data.my2Nxxy_Nyxx)[0] = child_data_array[0].my2Nxxy_Nyxx;
  6216. ((T*)&current_box_data.my2Nxxz_Nzxx)[0] = child_data_array[0].my2Nxxz_Nzxx;
  6217. ((T*)&current_box_data.my2Nyyz_Nzyy)[0] = child_data_array[0].my2Nyyz_Nzyy;
  6218. ((T*)&current_box_data.my2Nyyx_Nxyy)[0] = child_data_array[0].my2Nyyx_Nxyy;
  6219. ((T*)&current_box_data.my2Nzzx_Nxzz)[0] = child_data_array[0].my2Nzzx_Nxzz;
  6220. ((T*)&current_box_data.my2Nzzy_Nyzz)[0] = child_data_array[0].my2Nzzy_Nyzz;
  6221. for (int i = 1; i < nchildren; ++i)
  6222. {
  6223. for (int j = 0; j < 3; ++j)
  6224. ((T*)&current_box_data.myNijDiag[j])[i] = child_data_array[i].myNijDiag[j];
  6225. ((T*)&current_box_data.myNxy_Nyx)[i] = child_data_array[i].myNxy + child_data_array[i].myNyx;
  6226. ((T*)&current_box_data.myNyz_Nzy)[i] = child_data_array[i].myNyz + child_data_array[i].myNzy;
  6227. ((T*)&current_box_data.myNzx_Nxz)[i] = child_data_array[i].myNzx + child_data_array[i].myNxz;
  6228. for (int j = 0; j < 3; ++j)
  6229. ((T*)&current_box_data.myNijkDiag[j])[i] = child_data_array[i].myNijkDiag[j];
  6230. ((T*)&current_box_data.mySumPermuteNxyz)[i] = child_data_array[i].mySumPermuteNxyz;
  6231. ((T*)&current_box_data.my2Nxxy_Nyxx)[i] = child_data_array[i].my2Nxxy_Nyxx;
  6232. ((T*)&current_box_data.my2Nxxz_Nzxx)[i] = child_data_array[i].my2Nxxz_Nzxx;
  6233. ((T*)&current_box_data.my2Nyyz_Nzyy)[i] = child_data_array[i].my2Nyyz_Nzyy;
  6234. ((T*)&current_box_data.my2Nyyx_Nxyy)[i] = child_data_array[i].my2Nyyx_Nxyy;
  6235. ((T*)&current_box_data.my2Nzzx_Nxzz)[i] = child_data_array[i].my2Nzzx_Nxzz;
  6236. ((T*)&current_box_data.my2Nzzy_Nyzz)[i] = child_data_array[i].my2Nzzy_Nyzz;
  6237. }
  6238. for (int i = nchildren; i < BVH_N; ++i)
  6239. {
  6240. // Set to zero, just to avoid false positives for uses of uninitialized memory.
  6241. for (int j = 0; j < 3; ++j)
  6242. ((T*)&current_box_data.myNijDiag[j])[i] = 0;
  6243. ((T*)&current_box_data.myNxy_Nyx)[i] = 0;
  6244. ((T*)&current_box_data.myNyz_Nzy)[i] = 0;
  6245. ((T*)&current_box_data.myNzx_Nxz)[i] = 0;
  6246. for (int j = 0; j < 3; ++j)
  6247. ((T*)&current_box_data.myNijkDiag[j])[i] = 0;
  6248. ((T*)&current_box_data.mySumPermuteNxyz)[i] = 0;
  6249. ((T*)&current_box_data.my2Nxxy_Nyxx)[i] = 0;
  6250. ((T*)&current_box_data.my2Nxxz_Nzxx)[i] = 0;
  6251. ((T*)&current_box_data.my2Nyyz_Nzyy)[i] = 0;
  6252. ((T*)&current_box_data.my2Nyyx_Nxyy)[i] = 0;
  6253. ((T*)&current_box_data.my2Nzzx_Nxzz)[i] = 0;
  6254. ((T*)&current_box_data.my2Nzzy_Nyzz)[i] = 0;
  6255. }
  6256. for (int i = 0; i < nchildren; ++i)
  6257. {
  6258. const LocalData &child_data = child_data_array[i];
  6259. UT_Vector3T<T> displacement = child_data.myAverageP - UT_Vector3T<T>(data_for_parent->myAverageP);
  6260. UT_Vector3T<T> N = child_data.myN;
  6261. // Adjust Nij for the change in centre P
  6262. data_for_parent->myNijDiag += N*displacement;
  6263. T Nxy = child_data.myNxy + N[0]*displacement[1];
  6264. T Nyx = child_data.myNyx + N[1]*displacement[0];
  6265. T Nyz = child_data.myNyz + N[1]*displacement[2];
  6266. T Nzy = child_data.myNzy + N[2]*displacement[1];
  6267. T Nzx = child_data.myNzx + N[2]*displacement[0];
  6268. T Nxz = child_data.myNxz + N[0]*displacement[2];
  6269. data_for_parent->myNxy += Nxy;
  6270. data_for_parent->myNyx += Nyx;
  6271. data_for_parent->myNyz += Nyz;
  6272. data_for_parent->myNzy += Nzy;
  6273. data_for_parent->myNzx += Nzx;
  6274. data_for_parent->myNxz += Nxz;
  6275. #if TAYLOR_SERIES_ORDER >= 2
  6276. if (order >= 2)
  6277. {
  6278. // Adjust Nijk for the change in centre P
  6279. data_for_parent->myNijkDiag += T(2)*displacement*child_data.myNijDiag + displacement*displacement*child_data.myN;
  6280. data_for_parent->mySumPermuteNxyz += (displacement[0]*(Nyz+Nzy) + displacement[1]*(Nzx+Nxz) + displacement[2]*(Nxy+Nyx));
  6281. data_for_parent->my2Nxxy_Nyxx +=
  6282. 2*(displacement[1]*child_data.myNijDiag[0] + displacement[0]*child_data.myNxy + N[0]*displacement[0]*displacement[1])
  6283. + 2*child_data.myNyx*displacement[0] + N[1]*displacement[0]*displacement[0];
  6284. data_for_parent->my2Nxxz_Nzxx +=
  6285. 2*(displacement[2]*child_data.myNijDiag[0] + displacement[0]*child_data.myNxz + N[0]*displacement[0]*displacement[2])
  6286. + 2*child_data.myNzx*displacement[0] + N[2]*displacement[0]*displacement[0];
  6287. data_for_parent->my2Nyyz_Nzyy +=
  6288. 2*(displacement[2]*child_data.myNijDiag[1] + displacement[1]*child_data.myNyz + N[1]*displacement[1]*displacement[2])
  6289. + 2*child_data.myNzy*displacement[1] + N[2]*displacement[1]*displacement[1];
  6290. data_for_parent->my2Nyyx_Nxyy +=
  6291. 2*(displacement[0]*child_data.myNijDiag[1] + displacement[1]*child_data.myNyx + N[1]*displacement[1]*displacement[0])
  6292. + 2*child_data.myNxy*displacement[1] + N[0]*displacement[1]*displacement[1];
  6293. data_for_parent->my2Nzzx_Nxzz +=
  6294. 2*(displacement[0]*child_data.myNijDiag[2] + displacement[2]*child_data.myNzx + N[2]*displacement[2]*displacement[0])
  6295. + 2*child_data.myNxz*displacement[2] + N[0]*displacement[2]*displacement[2];
  6296. data_for_parent->my2Nzzy_Nyzz +=
  6297. 2*(displacement[1]*child_data.myNijDiag[2] + displacement[2]*child_data.myNzy + N[2]*displacement[2]*displacement[1])
  6298. + 2*child_data.myNyz*displacement[2] + N[1]*displacement[2]*displacement[2];
  6299. }
  6300. #endif
  6301. }
  6302. }
  6303. #endif
  6304. #if SOLID_ANGLE_DEBUG
  6305. UTdebugFormat("");
  6306. UTdebugFormat("Node {}: nchildren = {}; maxP = {}", nodei, nchildren, SYSsqrt(current_box_data.myMaxPDist2));
  6307. UTdebugFormat(" P = {}; N = {}", current_box_data.myAverageP, current_box_data.myN);
  6308. #if TAYLOR_SERIES_ORDER >= 1
  6309. UTdebugFormat(" Nii = {}", current_box_data.myNijDiag);
  6310. UTdebugFormat(" Nxy+Nyx = {}; Nyz+Nzy = {}; Nyz+Nzy = {}", current_box_data.myNxy_Nyx, current_box_data.myNyz_Nzy, current_box_data.myNzx_Nxz);
  6311. #if TAYLOR_SERIES_ORDER >= 2
  6312. UTdebugFormat(" Niii = {}; 2(Nxyz+Nyzx+Nzxy) = {}", current_box_data.myNijkDiag, current_box_data.mySumPermuteNxyz);
  6313. UTdebugFormat(" 2Nxxy+Nyxx = {}; 2Nxxz+Nzxx = {}", current_box_data.my2Nxxy_Nyxx, current_box_data.my2Nxxz_Nzxx);
  6314. UTdebugFormat(" 2Nyyz+Nzyy = {}; 2Nyyx+Nxyy = {}", current_box_data.my2Nyyz_Nzyy, current_box_data.my2Nyyx_Nxyy);
  6315. UTdebugFormat(" 2Nzzx+Nxzz = {}; 2Nzzy+Nyzz = {}", current_box_data.my2Nzzx_Nxzz, current_box_data.my2Nzzy_Nyzz);
  6316. #endif
  6317. #endif
  6318. #endif
  6319. }
  6320. };
  6321. #if SOLID_ANGLE_TIME_PRECOMPUTE
  6322. timer.start();
  6323. #endif
  6324. const PrecomputeFunctors functors(box_data, triangle_boxes.array(), triangle_points, positions, order);
  6325. // NOTE: post-functor relies on non-null data_for_parent, so we have to pass one.
  6326. LocalData local_data;
  6327. myTree.template traverseParallel<LocalData>(4096, functors, &local_data);
  6328. //myTree.template traverse<LocalData>(functors);
  6329. #if SOLID_ANGLE_TIME_PRECOMPUTE
  6330. time = timer.stop();
  6331. UTdebugFormat("{} s to precompute coefficients.", time);
  6332. #endif
  6333. }
  6334. template<typename T,typename S>
  6335. inline void UT_SolidAngle<T, S>::clear()
  6336. {
  6337. myTree.clear();
  6338. myNBoxes = 0;
  6339. myOrder = 2;
  6340. myData.reset();
  6341. myNTriangles = 0;
  6342. myTrianglePoints = nullptr;
  6343. myNPoints = 0;
  6344. myPositions = nullptr;
  6345. }
  6346. template<typename T,typename S>
  6347. inline T UT_SolidAngle<T, S>::computeSolidAngle(const UT_Vector3T<T> &query_point, const T accuracy_scale) const
  6348. {
  6349. const T accuracy_scale2 = accuracy_scale*accuracy_scale;
  6350. struct SolidAngleFunctors
  6351. {
  6352. const BoxData *const myBoxData;
  6353. const UT_Vector3T<T> myQueryPoint;
  6354. const T myAccuracyScale2;
  6355. const UT_Vector3T<S> *const myPositions;
  6356. const int *const myTrianglePoints;
  6357. const int myOrder;
  6358. SolidAngleFunctors(
  6359. const BoxData *const box_data,
  6360. const UT_Vector3T<T> &query_point,
  6361. const T accuracy_scale2,
  6362. const int order,
  6363. const UT_Vector3T<S> *const positions,
  6364. const int *const triangle_points)
  6365. : myBoxData(box_data)
  6366. , myQueryPoint(query_point)
  6367. , myAccuracyScale2(accuracy_scale2)
  6368. , myOrder(order)
  6369. , myPositions(positions)
  6370. , myTrianglePoints(triangle_points)
  6371. {}
  6372. uint pre(const int nodei, T *data_for_parent) const
  6373. {
  6374. const BoxData &data = myBoxData[nodei];
  6375. const typename BoxData::Type maxP2 = data.myMaxPDist2;
  6376. UT_FixedVector<typename BoxData::Type,3> q;
  6377. q[0] = typename BoxData::Type(myQueryPoint[0]);
  6378. q[1] = typename BoxData::Type(myQueryPoint[1]);
  6379. q[2] = typename BoxData::Type(myQueryPoint[2]);
  6380. q -= data.myAverageP;
  6381. const typename BoxData::Type qlength2 = q[0]*q[0] + q[1]*q[1] + q[2]*q[2];
  6382. // If the query point is within a factor of accuracy_scale of the box radius,
  6383. // it's assumed to be not a good enough approximation, so it needs to descend.
  6384. // TODO: Is there a way to estimate the error?
  6385. static_assert((std::is_same<typename BoxData::Type,v4uf>::value), "FIXME: Implement support for other tuple types!");
  6386. v4uu descend_mask = (qlength2 <= maxP2*myAccuracyScale2);
  6387. uint descend_bitmask = _mm_movemask_ps(V4SF(descend_mask.vector));
  6388. constexpr uint allchildbits = ((uint(1)<<BVH_N)-1);
  6389. if (descend_bitmask == allchildbits)
  6390. {
  6391. *data_for_parent = 0;
  6392. return allchildbits;
  6393. }
  6394. // qlength2 must be non-zero, since it's strictly greater than something.
  6395. // We still need to be careful for NaNs, though, because the 4th power might cause problems.
  6396. const typename BoxData::Type qlength_m2 = typename BoxData::Type(1.0)/qlength2;
  6397. const typename BoxData::Type qlength_m1 = sqrt(qlength_m2);
  6398. // Normalize q to reduce issues with overflow/underflow, since we'd need the 7th power
  6399. // if we didn't normalize, and (1e-6)^-7 = 1e42, which overflows single-precision.
  6400. q *= qlength_m1;
  6401. typename BoxData::Type Omega_approx = -qlength_m2*dot(q,data.myN);
  6402. #if TAYLOR_SERIES_ORDER >= 1
  6403. const int order = myOrder;
  6404. if (order >= 1)
  6405. {
  6406. const UT_FixedVector<typename BoxData::Type,3> q2 = q*q;
  6407. const typename BoxData::Type qlength_m3 = qlength_m2*qlength_m1;
  6408. const typename BoxData::Type Omega_1 =
  6409. qlength_m3*(data.myNijDiag[0] + data.myNijDiag[1] + data.myNijDiag[2]
  6410. -typename BoxData::Type(3.0)*(dot(q2,data.myNijDiag) +
  6411. q[0]*q[1]*data.myNxy_Nyx +
  6412. q[0]*q[2]*data.myNzx_Nxz +
  6413. q[1]*q[2]*data.myNyz_Nzy));
  6414. Omega_approx += Omega_1;
  6415. #if TAYLOR_SERIES_ORDER >= 2
  6416. if (order >= 2)
  6417. {
  6418. const UT_FixedVector<typename BoxData::Type,3> q3 = q2*q;
  6419. const typename BoxData::Type qlength_m4 = qlength_m2*qlength_m2;
  6420. typename BoxData::Type temp0[3] = {
  6421. data.my2Nyyx_Nxyy+data.my2Nzzx_Nxzz,
  6422. data.my2Nzzy_Nyzz+data.my2Nxxy_Nyxx,
  6423. data.my2Nxxz_Nzxx+data.my2Nyyz_Nzyy
  6424. };
  6425. typename BoxData::Type temp1[3] = {
  6426. q[1]*data.my2Nxxy_Nyxx + q[2]*data.my2Nxxz_Nzxx,
  6427. q[2]*data.my2Nyyz_Nzyy + q[0]*data.my2Nyyx_Nxyy,
  6428. q[0]*data.my2Nzzx_Nxzz + q[1]*data.my2Nzzy_Nyzz
  6429. };
  6430. const typename BoxData::Type Omega_2 =
  6431. qlength_m4*(typename BoxData::Type(1.5)*dot(q, typename BoxData::Type(3)*data.myNijkDiag + UT_FixedVector<typename BoxData::Type,3>(temp0))
  6432. -typename BoxData::Type(7.5)*(dot(q3,data.myNijkDiag) + q[0]*q[1]*q[2]*data.mySumPermuteNxyz + dot(q2, UT_FixedVector<typename BoxData::Type,3>(temp1))));
  6433. Omega_approx += Omega_2;
  6434. }
  6435. #endif
  6436. }
  6437. #endif
  6438. // If q is so small that we got NaNs and we just have a
  6439. // small bounding box, it needs to descend.
  6440. const v4uu mask = Omega_approx.isFinite() & ~descend_mask;
  6441. Omega_approx = Omega_approx & mask;
  6442. descend_bitmask = (~_mm_movemask_ps(V4SF(mask.vector))) & allchildbits;
  6443. T sum = Omega_approx[0];
  6444. for (int i = 1; i < BVH_N; ++i)
  6445. sum += Omega_approx[i];
  6446. *data_for_parent = sum;
  6447. return descend_bitmask;
  6448. }
  6449. void item(const int itemi, const int parent_nodei, T &data_for_parent) const
  6450. {
  6451. const UT_Vector3T<S> *const positions = myPositions;
  6452. const int *const cur_triangle_points = myTrianglePoints + 3*itemi;
  6453. const UT_Vector3T<T> a = positions[cur_triangle_points[0]];
  6454. const UT_Vector3T<T> b = positions[cur_triangle_points[1]];
  6455. const UT_Vector3T<T> c = positions[cur_triangle_points[2]];
  6456. data_for_parent = UTsignedSolidAngleTri(a, b, c, myQueryPoint);
  6457. }
  6458. SYS_FORCE_INLINE void post(const int nodei, const int parent_nodei, T *data_for_parent, const int nchildren, const T *child_data_array, const uint descend_bits) const
  6459. {
  6460. T sum = (descend_bits&1) ? child_data_array[0] : 0;
  6461. for (int i = 1; i < nchildren; ++i)
  6462. sum += ((descend_bits>>i)&1) ? child_data_array[i] : 0;
  6463. *data_for_parent += sum;
  6464. }
  6465. };
  6466. const SolidAngleFunctors functors(myData.get(), query_point, accuracy_scale2, myOrder, myPositions, myTrianglePoints);
  6467. T sum;
  6468. myTree.traverseVector(functors, &sum);
  6469. return sum;
  6470. }
  6471. template<typename T,typename S>
  6472. struct UT_SubtendedAngle<T,S>::BoxData
  6473. {
  6474. void clear()
  6475. {
  6476. // Set everything to zero
  6477. memset(this,0,sizeof(*this));
  6478. }
  6479. using Type = typename std::conditional<BVH_N==4 && std::is_same<T,float>::value, v4uf, UT_FixedVector<T,BVH_N>>::type;
  6480. using SType = typename std::conditional<BVH_N==4 && std::is_same<S,float>::value, v4uf, UT_FixedVector<S,BVH_N>>::type;
  6481. /// An upper bound on the squared distance from myAverageP to the farthest point in the box.
  6482. SType myMaxPDist2;
  6483. /// Centre of mass of the mesh surface in this box
  6484. UT_FixedVector<Type,2> myAverageP;
  6485. /// Unnormalized, area-weighted normal of the mesh in this box
  6486. UT_FixedVector<Type,2> myN;
  6487. /// Values for Omega_1
  6488. /// @{
  6489. UT_FixedVector<Type,2> myNijDiag; // Nxx, Nyy
  6490. Type myNxy_Nyx; // Nxy+Nyx
  6491. /// @}
  6492. /// Values for Omega_2
  6493. /// @{
  6494. UT_FixedVector<Type,2> myNijkDiag; // Nxxx, Nyyy
  6495. Type my2Nxxy_Nyxx; // Nxxy+Nxyx+Nyxx = 2Nxxy+Nyxx
  6496. Type my2Nyyx_Nxyy; // Nyyx+Nyxy+Nxyy = 2Nyyx+Nxyy
  6497. /// @}
  6498. };
  6499. template<typename T,typename S>
  6500. inline UT_SubtendedAngle<T,S>::UT_SubtendedAngle()
  6501. : myTree()
  6502. , myNBoxes(0)
  6503. , myOrder(2)
  6504. , myData(nullptr)
  6505. , myNSegments(0)
  6506. , mySegmentPoints(nullptr)
  6507. , myNPoints(0)
  6508. , myPositions(nullptr)
  6509. {}
  6510. template<typename T,typename S>
  6511. inline UT_SubtendedAngle<T,S>::~UT_SubtendedAngle()
  6512. {
  6513. // Default destruction works, but this needs to be outlined
  6514. // to avoid having to include UT_BVHImpl.h in the header,
  6515. // (for the UT_UniquePtr destructor.)
  6516. }
  6517. template<typename T,typename S>
  6518. inline void UT_SubtendedAngle<T,S>::init(
  6519. const int nsegments,
  6520. const int *const segment_points,
  6521. const int npoints,
  6522. const UT_Vector2T<S> *const positions,
  6523. const int order)
  6524. {
  6525. #if SOLID_ANGLE_DEBUG
  6526. UTdebugFormat("");
  6527. UTdebugFormat("");
  6528. UTdebugFormat("Building BVH for {} segments on {} points:", nsegments, npoints);
  6529. #endif
  6530. myOrder = order;
  6531. myNSegments = nsegments;
  6532. mySegmentPoints = segment_points;
  6533. myNPoints = npoints;
  6534. myPositions = positions;
  6535. #if SOLID_ANGLE_TIME_PRECOMPUTE
  6536. UT_StopWatch timer;
  6537. timer.start();
  6538. #endif
  6539. UT_SmallArray<UT::Box<S,2>> segment_boxes;
  6540. segment_boxes.setSizeNoInit(nsegments);
  6541. if (nsegments < 16*1024)
  6542. {
  6543. const int *cur_segment_points = segment_points;
  6544. for (int i = 0; i < nsegments; ++i, cur_segment_points += 2)
  6545. {
  6546. UT::Box<S,2> &box = segment_boxes[i];
  6547. box.initBounds(positions[cur_segment_points[0]]);
  6548. box.enlargeBounds(positions[cur_segment_points[1]]);
  6549. }
  6550. }
  6551. else
  6552. {
  6553. igl::parallel_for(nsegments,
  6554. [segment_points,&segment_boxes,positions](int i)
  6555. {
  6556. const int *cur_segment_points = segment_points + i*2;
  6557. UT::Box<S,2> &box = segment_boxes[i];
  6558. box.initBounds(positions[cur_segment_points[0]]);
  6559. box.enlargeBounds(positions[cur_segment_points[1]]);
  6560. });
  6561. }
  6562. #if SOLID_ANGLE_TIME_PRECOMPUTE
  6563. double time = timer.stop();
  6564. UTdebugFormat("{} s to create bounding boxes.", time);
  6565. timer.start();
  6566. #endif
  6567. myTree.template init<UT::BVH_Heuristic::BOX_AREA,S,2>(segment_boxes.array(), nsegments);
  6568. #if SOLID_ANGLE_TIME_PRECOMPUTE
  6569. time = timer.stop();
  6570. UTdebugFormat("{} s to initialize UT_BVH structure. {} nodes", time, myTree.getNumNodes());
  6571. #endif
  6572. //myTree.debugDump();
  6573. const int nnodes = myTree.getNumNodes();
  6574. myNBoxes = nnodes;
  6575. BoxData *box_data = new BoxData[nnodes];
  6576. myData.reset(box_data);
  6577. // Some data are only needed during initialization.
  6578. struct LocalData
  6579. {
  6580. // Bounding box
  6581. UT::Box<S,2> myBox;
  6582. // P and N are needed from each child for computing Nij.
  6583. UT_Vector2T<T> myAverageP;
  6584. UT_Vector2T<T> myLengthP;
  6585. UT_Vector2T<T> myN;
  6586. // Unsigned length is needed for computing the average position.
  6587. T myLength;
  6588. // These are needed for computing Nijk.
  6589. UT_Vector2T<T> myNijDiag;
  6590. T myNxy; T myNyx;
  6591. UT_Vector2T<T> myNijkDiag; // Nxxx, Nyyy
  6592. T my2Nxxy_Nyxx; // Nxxy+Nxyx+Nyxx = 2Nxxy+Nyxx
  6593. T my2Nyyx_Nxyy; // Nyyx+Nyxy+Nxyy = 2Nyyx+Nxyy
  6594. };
  6595. struct PrecomputeFunctors
  6596. {
  6597. BoxData *const myBoxData;
  6598. const UT::Box<S,2> *const mySegmentBoxes;
  6599. const int *const mySegmentPoints;
  6600. const UT_Vector2T<S> *const myPositions;
  6601. const int myOrder;
  6602. PrecomputeFunctors(
  6603. BoxData *box_data,
  6604. const UT::Box<S,2> *segment_boxes,
  6605. const int *segment_points,
  6606. const UT_Vector2T<S> *positions,
  6607. const int order)
  6608. : myBoxData(box_data)
  6609. , mySegmentBoxes(segment_boxes)
  6610. , mySegmentPoints(segment_points)
  6611. , myPositions(positions)
  6612. , myOrder(order)
  6613. {}
  6614. constexpr SYS_FORCE_INLINE bool pre(const int nodei, LocalData *data_for_parent) const
  6615. {
  6616. return true;
  6617. }
  6618. void item(const int itemi, const int parent_nodei, LocalData &data_for_parent) const
  6619. {
  6620. const UT_Vector2T<S> *const positions = myPositions;
  6621. const int *const cur_segment_points = mySegmentPoints + 2*itemi;
  6622. const UT_Vector2T<T> a = positions[cur_segment_points[0]];
  6623. const UT_Vector2T<T> b = positions[cur_segment_points[1]];
  6624. const UT_Vector2T<T> ab = b-a;
  6625. const UT::Box<S,2> &segment_box = mySegmentBoxes[itemi];
  6626. data_for_parent.myBox = segment_box;
  6627. // Length-weighted normal (unnormalized)
  6628. UT_Vector2T<T> N;
  6629. N[0] = ab[1];
  6630. N[1] = -ab[0];
  6631. const T length2 = ab.length2();
  6632. const T length = SYSsqrt(length2);
  6633. const UT_Vector2T<T> P = T(0.5)*(a+b);
  6634. data_for_parent.myAverageP = P;
  6635. data_for_parent.myLengthP = P*length;
  6636. data_for_parent.myN = N;
  6637. #if SOLID_ANGLE_DEBUG
  6638. UTdebugFormat("");
  6639. UTdebugFormat("Triangle {}: P = {}; N = {}; length = {}", itemi, P, N, length);
  6640. UTdebugFormat(" box = {}", data_for_parent.myBox);
  6641. #endif
  6642. data_for_parent.myLength = length;
  6643. const int order = myOrder;
  6644. if (order < 1)
  6645. return;
  6646. // NOTE: Due to P being at the centroid, segments have Nij = 0
  6647. // contributions to Nij.
  6648. data_for_parent.myNijDiag = T(0);
  6649. data_for_parent.myNxy = 0; data_for_parent.myNyx = 0;
  6650. if (order < 2)
  6651. return;
  6652. // If it's zero-length, the results are zero, so we can skip.
  6653. if (length == 0)
  6654. {
  6655. data_for_parent.myNijkDiag = T(0);
  6656. data_for_parent.my2Nxxy_Nyxx = 0;
  6657. data_for_parent.my2Nyyx_Nxyy = 0;
  6658. return;
  6659. }
  6660. T integral_xx = ab[0]*ab[0]/T(12);
  6661. T integral_xy = ab[0]*ab[1]/T(12);
  6662. T integral_yy = ab[1]*ab[1]/T(12);
  6663. data_for_parent.myNijkDiag[0] = integral_xx*N[0];
  6664. data_for_parent.myNijkDiag[1] = integral_yy*N[1];
  6665. T Nxxy = N[0]*integral_xy;
  6666. T Nyxx = N[1]*integral_xx;
  6667. T Nyyx = N[1]*integral_xy;
  6668. T Nxyy = N[0]*integral_yy;
  6669. data_for_parent.my2Nxxy_Nyxx = 2*Nxxy + Nyxx;
  6670. data_for_parent.my2Nyyx_Nxyy = 2*Nyyx + Nxyy;
  6671. #if SOLID_ANGLE_DEBUG
  6672. UTdebugFormat(" integral_xx = {}; yy = {}", integral_xx, integral_yy);
  6673. UTdebugFormat(" integral_xy = {}", integral_xy);
  6674. #endif
  6675. }
  6676. void post(const int nodei, const int parent_nodei, LocalData *data_for_parent, const int nchildren, const LocalData *child_data_array) const
  6677. {
  6678. // NOTE: Although in the general case, data_for_parent may be null for the root call,
  6679. // this functor assumes that it's non-null, so the call below must pass a non-null pointer.
  6680. BoxData &current_box_data = myBoxData[nodei];
  6681. UT_Vector2T<T> N = child_data_array[0].myN;
  6682. ((T*)&current_box_data.myN[0])[0] = N[0];
  6683. ((T*)&current_box_data.myN[1])[0] = N[1];
  6684. UT_Vector2T<T> lengthP = child_data_array[0].myLengthP;
  6685. T length = child_data_array[0].myLength;
  6686. const UT_Vector2T<T> local_P = child_data_array[0].myAverageP;
  6687. ((T*)&current_box_data.myAverageP[0])[0] = local_P[0];
  6688. ((T*)&current_box_data.myAverageP[1])[0] = local_P[1];
  6689. for (int i = 1; i < nchildren; ++i)
  6690. {
  6691. const UT_Vector2T<T> local_N = child_data_array[i].myN;
  6692. N += local_N;
  6693. ((T*)&current_box_data.myN[0])[i] = local_N[0];
  6694. ((T*)&current_box_data.myN[1])[i] = local_N[1];
  6695. lengthP += child_data_array[i].myLengthP;
  6696. length += child_data_array[i].myLength;
  6697. const UT_Vector2T<T> local_P = child_data_array[i].myAverageP;
  6698. ((T*)&current_box_data.myAverageP[0])[i] = local_P[0];
  6699. ((T*)&current_box_data.myAverageP[1])[i] = local_P[1];
  6700. }
  6701. for (int i = nchildren; i < BVH_N; ++i)
  6702. {
  6703. // Set to zero, just to avoid false positives for uses of uninitialized memory.
  6704. ((T*)&current_box_data.myN[0])[i] = 0;
  6705. ((T*)&current_box_data.myN[1])[i] = 0;
  6706. ((T*)&current_box_data.myAverageP[0])[i] = 0;
  6707. ((T*)&current_box_data.myAverageP[1])[i] = 0;
  6708. }
  6709. data_for_parent->myN = N;
  6710. data_for_parent->myLengthP = lengthP;
  6711. data_for_parent->myLength = length;
  6712. UT::Box<S,2> box(child_data_array[0].myBox);
  6713. for (int i = 1; i < nchildren; ++i)
  6714. box.combine(child_data_array[i].myBox);
  6715. // Normalize P
  6716. UT_Vector2T<T> averageP;
  6717. if (length > 0)
  6718. averageP = lengthP/length;
  6719. else
  6720. averageP = T(0.5)*(box.getMin() + box.getMax());
  6721. data_for_parent->myAverageP = averageP;
  6722. data_for_parent->myBox = box;
  6723. for (int i = 0; i < nchildren; ++i)
  6724. {
  6725. const UT::Box<S,2> &local_box(child_data_array[i].myBox);
  6726. const UT_Vector2T<T> &local_P = child_data_array[i].myAverageP;
  6727. const UT_Vector2T<T> maxPDiff = SYSmax(local_P-UT_Vector2T<T>(local_box.getMin()), UT_Vector2T<T>(local_box.getMax())-local_P);
  6728. ((T*)&current_box_data.myMaxPDist2)[i] = maxPDiff.length2();
  6729. }
  6730. for (int i = nchildren; i < BVH_N; ++i)
  6731. {
  6732. // This child is non-existent. If we set myMaxPDist2 to infinity, it will never
  6733. // use the approximation, and the traverseVector function can check for EMPTY.
  6734. ((T*)&current_box_data.myMaxPDist2)[i] = std::numeric_limits<T>::infinity();
  6735. }
  6736. const int order = myOrder;
  6737. if (order >= 1)
  6738. {
  6739. // We now have the current box's P, so we can adjust Nij and Nijk
  6740. data_for_parent->myNijDiag = child_data_array[0].myNijDiag;
  6741. data_for_parent->myNxy = 0;
  6742. data_for_parent->myNyx = 0;
  6743. data_for_parent->myNijkDiag = child_data_array[0].myNijkDiag;
  6744. data_for_parent->my2Nxxy_Nyxx = child_data_array[0].my2Nxxy_Nyxx;
  6745. data_for_parent->my2Nyyx_Nxyy = child_data_array[0].my2Nyyx_Nxyy;
  6746. for (int i = 1; i < nchildren; ++i)
  6747. {
  6748. data_for_parent->myNijDiag += child_data_array[i].myNijDiag;
  6749. data_for_parent->myNijkDiag += child_data_array[i].myNijkDiag;
  6750. data_for_parent->my2Nxxy_Nyxx += child_data_array[i].my2Nxxy_Nyxx;
  6751. data_for_parent->my2Nyyx_Nxyy += child_data_array[i].my2Nyyx_Nxyy;
  6752. }
  6753. for (int j = 0; j < 2; ++j)
  6754. ((T*)&current_box_data.myNijDiag[j])[0] = child_data_array[0].myNijDiag[j];
  6755. ((T*)&current_box_data.myNxy_Nyx)[0] = child_data_array[0].myNxy + child_data_array[0].myNyx;
  6756. for (int j = 0; j < 2; ++j)
  6757. ((T*)&current_box_data.myNijkDiag[j])[0] = child_data_array[0].myNijkDiag[j];
  6758. ((T*)&current_box_data.my2Nxxy_Nyxx)[0] = child_data_array[0].my2Nxxy_Nyxx;
  6759. ((T*)&current_box_data.my2Nyyx_Nxyy)[0] = child_data_array[0].my2Nyyx_Nxyy;
  6760. for (int i = 1; i < nchildren; ++i)
  6761. {
  6762. for (int j = 0; j < 2; ++j)
  6763. ((T*)&current_box_data.myNijDiag[j])[i] = child_data_array[i].myNijDiag[j];
  6764. ((T*)&current_box_data.myNxy_Nyx)[i] = child_data_array[i].myNxy + child_data_array[i].myNyx;
  6765. for (int j = 0; j < 2; ++j)
  6766. ((T*)&current_box_data.myNijkDiag[j])[i] = child_data_array[i].myNijkDiag[j];
  6767. ((T*)&current_box_data.my2Nxxy_Nyxx)[i] = child_data_array[i].my2Nxxy_Nyxx;
  6768. ((T*)&current_box_data.my2Nyyx_Nxyy)[i] = child_data_array[i].my2Nyyx_Nxyy;
  6769. }
  6770. for (int i = nchildren; i < BVH_N; ++i)
  6771. {
  6772. // Set to zero, just to avoid false positives for uses of uninitialized memory.
  6773. for (int j = 0; j < 2; ++j)
  6774. ((T*)&current_box_data.myNijDiag[j])[i] = 0;
  6775. ((T*)&current_box_data.myNxy_Nyx)[i] = 0;
  6776. for (int j = 0; j < 2; ++j)
  6777. ((T*)&current_box_data.myNijkDiag[j])[i] = 0;
  6778. ((T*)&current_box_data.my2Nxxy_Nyxx)[i] = 0;
  6779. ((T*)&current_box_data.my2Nyyx_Nxyy)[i] = 0;
  6780. }
  6781. for (int i = 0; i < nchildren; ++i)
  6782. {
  6783. const LocalData &child_data = child_data_array[i];
  6784. UT_Vector2T<T> displacement = child_data.myAverageP - UT_Vector2T<T>(data_for_parent->myAverageP);
  6785. UT_Vector2T<T> N = child_data.myN;
  6786. // Adjust Nij for the change in centre P
  6787. data_for_parent->myNijDiag += N*displacement;
  6788. T Nxy = child_data.myNxy + N[0]*displacement[1];
  6789. T Nyx = child_data.myNyx + N[1]*displacement[0];
  6790. data_for_parent->myNxy += Nxy;
  6791. data_for_parent->myNyx += Nyx;
  6792. if (order >= 2)
  6793. {
  6794. // Adjust Nijk for the change in centre P
  6795. data_for_parent->myNijkDiag += T(2)*displacement*child_data.myNijDiag + displacement*displacement*child_data.myN;
  6796. data_for_parent->my2Nxxy_Nyxx +=
  6797. 2*(displacement[1]*child_data.myNijDiag[0] + displacement[0]*child_data.myNxy + N[0]*displacement[0]*displacement[1])
  6798. + 2*child_data.myNyx*displacement[0] + N[1]*displacement[0]*displacement[0];
  6799. data_for_parent->my2Nyyx_Nxyy +=
  6800. 2*(displacement[0]*child_data.myNijDiag[1] + displacement[1]*child_data.myNyx + N[1]*displacement[1]*displacement[0])
  6801. + 2*child_data.myNxy*displacement[1] + N[0]*displacement[1]*displacement[1];
  6802. }
  6803. }
  6804. }
  6805. #if SOLID_ANGLE_DEBUG
  6806. UTdebugFormat("");
  6807. UTdebugFormat("Node {}: nchildren = {}; maxP = {}", nodei, nchildren, SYSsqrt(current_box_data.myMaxPDist2));
  6808. UTdebugFormat(" P = {}; N = {}", current_box_data.myAverageP, current_box_data.myN);
  6809. UTdebugFormat(" Nii = {}", current_box_data.myNijDiag);
  6810. UTdebugFormat(" Nxy+Nyx = {}", current_box_data.myNxy_Nyx);
  6811. UTdebugFormat(" Niii = {}", current_box_data.myNijkDiag);
  6812. UTdebugFormat(" 2Nxxy+Nyxx = {}; 2Nyyx+Nxyy = {}", current_box_data.my2Nxxy_Nyxx, current_box_data.my2Nyyx_Nxyy);
  6813. #endif
  6814. }
  6815. };
  6816. #if SOLID_ANGLE_TIME_PRECOMPUTE
  6817. timer.start();
  6818. #endif
  6819. const PrecomputeFunctors functors(box_data, segment_boxes.array(), segment_points, positions, order);
  6820. // NOTE: post-functor relies on non-null data_for_parent, so we have to pass one.
  6821. LocalData local_data;
  6822. myTree.template traverseParallel<LocalData>(4096, functors, &local_data);
  6823. //myTree.template traverse<LocalData>(functors);
  6824. #if SOLID_ANGLE_TIME_PRECOMPUTE
  6825. time = timer.stop();
  6826. UTdebugFormat("{} s to precompute coefficients.", time);
  6827. #endif
  6828. }
  6829. template<typename T,typename S>
  6830. inline void UT_SubtendedAngle<T, S>::clear()
  6831. {
  6832. myTree.clear();
  6833. myNBoxes = 0;
  6834. myOrder = 2;
  6835. myData.reset();
  6836. myNSegments = 0;
  6837. mySegmentPoints = nullptr;
  6838. myNPoints = 0;
  6839. myPositions = nullptr;
  6840. }
  6841. template<typename T,typename S>
  6842. inline T UT_SubtendedAngle<T, S>::computeAngle(const UT_Vector2T<T> &query_point, const T accuracy_scale) const
  6843. {
  6844. const T accuracy_scale2 = accuracy_scale*accuracy_scale;
  6845. struct AngleFunctors
  6846. {
  6847. const BoxData *const myBoxData;
  6848. const UT_Vector2T<T> myQueryPoint;
  6849. const T myAccuracyScale2;
  6850. const UT_Vector2T<S> *const myPositions;
  6851. const int *const mySegmentPoints;
  6852. const int myOrder;
  6853. AngleFunctors(
  6854. const BoxData *const box_data,
  6855. const UT_Vector2T<T> &query_point,
  6856. const T accuracy_scale2,
  6857. const int order,
  6858. const UT_Vector2T<S> *const positions,
  6859. const int *const segment_points)
  6860. : myBoxData(box_data)
  6861. , myQueryPoint(query_point)
  6862. , myAccuracyScale2(accuracy_scale2)
  6863. , myOrder(order)
  6864. , myPositions(positions)
  6865. , mySegmentPoints(segment_points)
  6866. {}
  6867. uint pre(const int nodei, T *data_for_parent) const
  6868. {
  6869. const BoxData &data = myBoxData[nodei];
  6870. const typename BoxData::Type maxP2 = data.myMaxPDist2;
  6871. UT_FixedVector<typename BoxData::Type,2> q;
  6872. q[0] = typename BoxData::Type(myQueryPoint[0]);
  6873. q[1] = typename BoxData::Type(myQueryPoint[1]);
  6874. q -= data.myAverageP;
  6875. const typename BoxData::Type qlength2 = q[0]*q[0] + q[1]*q[1];
  6876. // If the query point is within a factor of accuracy_scale of the box radius,
  6877. // it's assumed to be not a good enough approximation, so it needs to descend.
  6878. // TODO: Is there a way to estimate the error?
  6879. static_assert((std::is_same<typename BoxData::Type,v4uf>::value), "FIXME: Implement support for other tuple types!");
  6880. v4uu descend_mask = (qlength2 <= maxP2*myAccuracyScale2);
  6881. uint descend_bitmask = _mm_movemask_ps(V4SF(descend_mask.vector));
  6882. constexpr uint allchildbits = ((uint(1)<<BVH_N)-1);
  6883. if (descend_bitmask == allchildbits)
  6884. {
  6885. *data_for_parent = 0;
  6886. return allchildbits;
  6887. }
  6888. // qlength2 must be non-zero, since it's strictly greater than something.
  6889. // We still need to be careful for NaNs, though, because the 4th power might cause problems.
  6890. const typename BoxData::Type qlength_m2 = typename BoxData::Type(1.0)/qlength2;
  6891. const typename BoxData::Type qlength_m1 = sqrt(qlength_m2);
  6892. // Normalize q to reduce issues with overflow/underflow, since we'd need the 6th power
  6893. // if we didn't normalize, and (1e-7)^-6 = 1e42, which overflows single-precision.
  6894. q *= qlength_m1;
  6895. typename BoxData::Type Omega_approx = -qlength_m1*dot(q,data.myN);
  6896. const int order = myOrder;
  6897. if (order >= 1)
  6898. {
  6899. const UT_FixedVector<typename BoxData::Type,2> q2 = q*q;
  6900. const typename BoxData::Type Omega_1 =
  6901. qlength_m2*(data.myNijDiag[0] + data.myNijDiag[1]
  6902. -typename BoxData::Type(2.0)*(dot(q2,data.myNijDiag) +
  6903. q[0]*q[1]*data.myNxy_Nyx));
  6904. Omega_approx += Omega_1;
  6905. if (order >= 2)
  6906. {
  6907. const UT_FixedVector<typename BoxData::Type,2> q3 = q2*q;
  6908. const typename BoxData::Type qlength_m3 = qlength_m2*qlength_m1;
  6909. typename BoxData::Type temp0[2] = {
  6910. data.my2Nyyx_Nxyy,
  6911. data.my2Nxxy_Nyxx
  6912. };
  6913. typename BoxData::Type temp1[2] = {
  6914. q[1]*data.my2Nxxy_Nyxx,
  6915. q[0]*data.my2Nyyx_Nxyy
  6916. };
  6917. const typename BoxData::Type Omega_2 =
  6918. qlength_m3*(dot(q, typename BoxData::Type(3)*data.myNijkDiag + UT_FixedVector<typename BoxData::Type,2>(temp0))
  6919. -typename BoxData::Type(4.0)*(dot(q3,data.myNijkDiag) + dot(q2, UT_FixedVector<typename BoxData::Type,2>(temp1))));
  6920. Omega_approx += Omega_2;
  6921. }
  6922. }
  6923. // If q is so small that we got NaNs and we just have a
  6924. // small bounding box, it needs to descend.
  6925. const v4uu mask = Omega_approx.isFinite() & ~descend_mask;
  6926. Omega_approx = Omega_approx & mask;
  6927. descend_bitmask = (~_mm_movemask_ps(V4SF(mask.vector))) & allchildbits;
  6928. T sum = Omega_approx[0];
  6929. for (int i = 1; i < BVH_N; ++i)
  6930. sum += Omega_approx[i];
  6931. *data_for_parent = sum;
  6932. return descend_bitmask;
  6933. }
  6934. void item(const int itemi, const int parent_nodei, T &data_for_parent) const
  6935. {
  6936. const UT_Vector2T<S> *const positions = myPositions;
  6937. const int *const cur_segment_points = mySegmentPoints + 2*itemi;
  6938. const UT_Vector2T<T> a = positions[cur_segment_points[0]];
  6939. const UT_Vector2T<T> b = positions[cur_segment_points[1]];
  6940. data_for_parent = UTsignedAngleSegment(a, b, myQueryPoint);
  6941. }
  6942. SYS_FORCE_INLINE void post(const int nodei, const int parent_nodei, T *data_for_parent, const int nchildren, const T *child_data_array, const uint descend_bits) const
  6943. {
  6944. T sum = (descend_bits&1) ? child_data_array[0] : 0;
  6945. for (int i = 1; i < nchildren; ++i)
  6946. sum += ((descend_bits>>i)&1) ? child_data_array[i] : 0;
  6947. *data_for_parent += sum;
  6948. }
  6949. };
  6950. const AngleFunctors functors(myData.get(), query_point, accuracy_scale2, myOrder, myPositions, mySegmentPoints);
  6951. T sum;
  6952. myTree.traverseVector(functors, &sum);
  6953. return sum;
  6954. }
  6955. // Instantiate our templates.
  6956. //template class UT_SolidAngle<fpreal32,fpreal32>;
  6957. // FIXME: The SIMD parts will need to be handled differently in order to support fpreal64.
  6958. //template class UT_SolidAngle<fpreal64,fpreal32>;
  6959. //template class UT_SolidAngle<fpreal64,fpreal64>;
  6960. //template class UT_SubtendedAngle<fpreal32,fpreal32>;
  6961. //template class UT_SubtendedAngle<fpreal64,fpreal32>;
  6962. //template class UT_SubtendedAngle<fpreal64,fpreal64>;
  6963. } // End HDK_Sample namespace
  6964. }}