DirectXMeshOptimize.cpp 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728
  1. //-------------------------------------------------------------------------------------
  2. // DirectXMeshOptimize.cpp
  3. //
  4. // DirectX Mesh Geometry Library - Mesh optimization
  5. //
  6. // THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF
  7. // ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO
  8. // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A
  9. // PARTICULAR PURPOSE.
  10. //
  11. // Copyright (c) Microsoft Corporation. All rights reserved.
  12. //
  13. // http://go.microsoft.com/fwlink/?LinkID=324981
  14. //-------------------------------------------------------------------------------------
  15. namespace DirectX
  16. {
  17. #define UNUSED32 (0xffffffff)
  18. #undef UINT32_MAX
  19. #define UINT32_MAX UINT_MAX
  20. #pragma warning(disable:4267)
  21. enum OPTFACES
  22. {
  23. OPTFACES_V_DEFAULT = 12,
  24. OPTFACES_R_DEFAULT = 7,
  25. // Default vertex cache size and restart threshold which is considered 'device independent'
  26. OPTFACES_V_STRIPORDER = 0,
  27. // Indicates no vertex cache optimization, only reordering into strips
  28. };
  29. template<class index_t>
  30. static inline uint32_t find_edge( _In_reads_(3) const index_t* indices, index_t search )
  31. {
  32. assert( indices != 0 );
  33. uint32_t edge = 0;
  34. for( ; edge < 3; ++edge )
  35. {
  36. if ( indices[ edge ] == search )
  37. break;
  38. }
  39. return edge;
  40. }
  41. //=====================================================================================
  42. // Attribute Utilities
  43. //=====================================================================================
  44. _Use_decl_annotations_
  45. static void ComputeSubsets( std__vector< std__pair<size_t,size_t> > &subsets, const uint32_t* attributes, size_t nFaces )
  46. {
  47. subsets.clear();
  48. if ( !nFaces )return;
  49. if ( !attributes )
  50. {
  51. subsets.emplace_back( std__pair<size_t,size_t>( 0, nFaces ) );
  52. return;
  53. }
  54. uint32_t lastAttr = attributes[ 0 ];
  55. size_t offset = 0;
  56. size_t count = 1;
  57. for( size_t j = 1; j < nFaces; ++j )
  58. {
  59. if ( attributes[ j ] != lastAttr )
  60. {
  61. subsets.emplace_back( std__pair<size_t,size_t>( offset, count ) );
  62. lastAttr = attributes[ j ];
  63. offset = j;
  64. count = 1;
  65. }
  66. else
  67. {
  68. count += 1;
  69. }
  70. }
  71. if ( count > 0 )subsets.emplace_back( std__pair<size_t,size_t>( offset, count ) );
  72. }
  73. //-------------------------------------------------------------------------------------
  74. #pragma warning(push)
  75. #pragma warning(disable:6101)
  76. template<class index_t>
  77. static Bool ReorderFaces( _In_reads_(nFaces*3) const index_t* ibin, _In_ size_t nFaces,
  78. _In_reads_opt_(nFaces*3) const uint32_t* adjin,
  79. _In_reads_(nFaces) const uint32_t* faceRemap,
  80. _Out_writes_(nFaces*3) index_t* ibout,
  81. _Out_writes_opt_(nFaces*3) uint32_t* adjout )
  82. {
  83. assert( ibin != 0 && faceRemap != 0 && ibout != 0 && ibin != ibout );
  84. _Analysis_assume_( ibin != 0 && faceRemap != 0 && ibout != 0 && ibin != ibout );
  85. assert( ( !adjin && !adjout ) || ( (adjin && adjout) && adjin != adjout ) );
  86. _Analysis_assume_( ( !adjin && !adjout ) || ( (adjin && adjout) && adjin != adjout ) );
  87. for( size_t j = 0; j < nFaces; ++j )
  88. {
  89. uint32_t src = faceRemap[ j ];
  90. if ( src == UNUSED32 )
  91. continue;
  92. if ( src < nFaces )
  93. {
  94. ibout[ j*3 ] = ibin[ src*3 ];
  95. ibout[ j*3 + 1 ] = ibin[ src*3 + 1 ];
  96. ibout[ j*3 + 2 ] = ibin[ src*3 + 2 ];
  97. if ( adjin && adjout )
  98. {
  99. adjout[ j*3 ] = adjin[ src*3 ];
  100. adjout[ j*3 + 1 ] = adjin[ src*3 + 1 ];
  101. adjout[ j*3 + 2 ] = adjin[ src*3 + 2 ];
  102. }
  103. }
  104. else
  105. return false;
  106. }
  107. return true;
  108. }
  109. #pragma warning(pop)
  110. //-------------------------------------------------------------------------------------
  111. template<class index_t>
  112. static Bool SwapFaces( _Inout_updates_all_(nFaces*3) index_t* ib, _In_ size_t nFaces,
  113. _Inout_updates_all_opt_(nFaces*3) uint32_t* adj,
  114. _In_reads_(nFaces) const uint32_t* faceRemap)
  115. {
  116. assert( ib != 0 && faceRemap != 0 );
  117. _Analysis_assume_( ib != 0 && faceRemap != 0 );
  118. std__unique_ptr<uint8_t> temp( ( sizeof(bool) + sizeof(uint32_t) ) * nFaces );
  119. if ( !temp )
  120. return false;
  121. uint32_t *faceRemapInverse = reinterpret_cast<uint32_t*>( temp.get() );
  122. for( uint32_t j = 0; j < nFaces; ++j )
  123. {
  124. faceRemapInverse[ faceRemap[ j ] ] = j;
  125. }
  126. bool *moved = reinterpret_cast<bool*>( temp.get() + sizeof(uint32_t) * nFaces );
  127. memset( moved, 0, sizeof(bool) * nFaces );
  128. for( size_t j = 0; j < nFaces; ++j )
  129. {
  130. if ( moved[ j ] )
  131. continue;
  132. uint32_t dest = faceRemapInverse[ j ];
  133. if ( dest == UNUSED32 )
  134. continue;
  135. if ( dest >= nFaces )
  136. return false;
  137. while( dest != j )
  138. {
  139. // Swap face
  140. index_t i0 = ib[ dest*3 ];
  141. index_t i1 = ib[ dest*3 + 1 ];
  142. index_t i2 = ib[ dest*3 + 2 ];
  143. ib[ dest*3 ] = ib[ j*3 ];
  144. ib[ dest*3 + 1 ] = ib[ j*3 + 1 ];
  145. ib[ dest*3 + 2 ] = ib[ j*3 + 2 ];
  146. ib[ j*3 ] = i0;
  147. ib[ j*3 + 1 ] = i1;
  148. ib[ j*3 + 2 ] = i2;
  149. if ( adj )
  150. {
  151. uint32_t a0 = adj[ dest*3 ];
  152. uint32_t a1 = adj[ dest*3 + 1 ];
  153. uint32_t a2 = adj[ dest*3 + 2 ];
  154. adj[ dest*3 ] = adj[ j*3 ];
  155. adj[ dest*3 + 1 ] = adj[ j*3 + 1 ];
  156. adj[ dest*3 + 2 ] = adj[ j*3 + 2 ];
  157. adj[ j*3 ] = a0;
  158. adj[ j*3 + 1 ] = a1;
  159. adj[ j*3 + 2 ] = a2;
  160. }
  161. moved[ dest ] = true;
  162. dest = faceRemapInverse[ dest ];
  163. if ( dest == UNUSED32 || moved[dest] )
  164. break;
  165. if ( dest >= nFaces )
  166. return false;
  167. }
  168. }
  169. return true;
  170. }
  171. //-------------------------------------------------------------------------------------
  172. static Bool SwapVertices( _Inout_updates_bytes_all_(nVerts*stride) void* vb, size_t stride, size_t nVerts,
  173. _Inout_updates_all_opt_(nVerts) uint32_t* pointRep, _In_reads_(nVerts) const uint32_t* vertexRemap )
  174. {
  175. if ( !vb || !stride || !nVerts || !vertexRemap )
  176. return false;
  177. std__unique_ptr<uint8_t> temp( nVerts + stride );
  178. if ( !temp )
  179. return false;
  180. uint8_t *moved = temp.get();
  181. memset( moved, 0, nVerts );
  182. uint8_t *vbtemp = temp.get() + nVerts;
  183. uint8_t *ptr = reinterpret_cast<uint8_t*>( vb );
  184. for( size_t j = 0; j < nVerts; ++j )
  185. {
  186. if ( moved[j] )
  187. continue;
  188. uint32_t dest = vertexRemap[ j ];
  189. if ( dest == UNUSED32 )
  190. continue;
  191. if ( dest >= nVerts )
  192. return false;
  193. bool next = false;
  194. while( dest != j )
  195. {
  196. // Swap vertex
  197. #pragma prefast(push)
  198. #pragma prefast(disable : 26019, "PREfast noise: Esp:1307")
  199. memcpy( vbtemp, ptr + dest*stride, stride );
  200. memcpy( ptr + dest*stride, ptr + j*stride, stride );
  201. memcpy( ptr + j*stride, vbtemp, stride );
  202. #pragma prefast(pop)
  203. if ( pointRep )
  204. {
  205. Swap( pointRep[ dest ], pointRep[ j ] );
  206. // Remap
  207. uint32_t pr = pointRep[ dest ];
  208. if ( pr < nVerts )
  209. {
  210. pointRep[ dest ] = vertexRemap[ pr ];
  211. }
  212. }
  213. moved[ dest ] = true;
  214. dest = vertexRemap[ dest ];
  215. if ( dest == UNUSED32 || moved[dest] )
  216. {
  217. next = true;
  218. break;
  219. }
  220. if ( dest >= nVerts )
  221. return false;
  222. }
  223. if ( next )
  224. continue;
  225. if ( pointRep )
  226. {
  227. // Remap
  228. uint32_t pr = pointRep[ j ];
  229. if ( pr < nVerts )
  230. {
  231. pointRep[ j ] = vertexRemap[ pr ];
  232. }
  233. }
  234. }
  235. return true;
  236. }
  237. //-------------------------------------------------------------------------------------
  238. template<class index_t>
  239. static Bool _FinalizeIB( _In_reads_(nFaces*3) const index_t* ibin, size_t nFaces,
  240. _In_reads_(nVerts) const uint32_t* vertexRemap, size_t nVerts,
  241. _Out_writes_(nFaces*3) index_t* ibout )
  242. {
  243. if ( !ibin || !nFaces || !vertexRemap || !nVerts || !ibout )
  244. return false;
  245. if ( ( uint64_t(nFaces) * 3 ) >= UINT32_MAX )
  246. return false;
  247. if ( nVerts >= index_t(-1) )
  248. return false;
  249. for( size_t j = 0; j < ( nFaces * 3 ); ++j )
  250. {
  251. index_t i = ibin[ j ];
  252. if ( i == index_t(-1) )
  253. {
  254. ibout[ j ] = index_t(-1);
  255. continue;
  256. }
  257. if ( i >= nVerts )
  258. return false;
  259. uint32_t dest = vertexRemap[ i ];
  260. if ( dest == UNUSED32 )
  261. {
  262. ibout[ j ] = i;
  263. continue;
  264. }
  265. if ( dest < nVerts )
  266. {
  267. ibout[ j ] = index_t( dest );
  268. }
  269. else
  270. return false;
  271. }
  272. return true;
  273. }
  274. //-------------------------------------------------------------------------------------
  275. template<class index_t>
  276. static Bool _FinalizeIB( _In_reads_(nFaces*3) index_t* ib, size_t nFaces, _In_reads_(nVerts) const uint32_t* vertexRemap, size_t nVerts )
  277. {
  278. if ( !ib || !nFaces || !vertexRemap || !nVerts )
  279. return false;
  280. if ( ( uint64_t(nFaces) * 3 ) >= UINT32_MAX )
  281. return false;
  282. if ( nVerts >= index_t(-1) )
  283. return false;
  284. for( size_t j = 0; j < ( nFaces * 3 ); ++j )
  285. {
  286. index_t i = ib[ j ];
  287. if( i == index_t(-1) )
  288. continue;
  289. if ( i >= nVerts )
  290. return false;
  291. uint32_t dest = vertexRemap[ i ];
  292. if ( dest == UNUSED32 )
  293. continue;
  294. if ( dest < nVerts )
  295. {
  296. ib[ j ] = index_t( dest );
  297. }
  298. else
  299. return false;
  300. }
  301. return true;
  302. }
  303. //-------------------------------------------------------------------------------------
  304. // Applies a face remap reordering to an index buffer
  305. //-------------------------------------------------------------------------------------
  306. _Use_decl_annotations_
  307. static Bool ReorderIB( const uint16_t* ibin, size_t nFaces, const uint32_t* faceRemap, uint16_t* ibout )
  308. {
  309. if ( !ibin || !nFaces || !faceRemap || !ibout )
  310. return false;
  311. if ( ( uint64_t(nFaces) * 3 ) >= UINT32_MAX )
  312. return false;
  313. if ( ibin == ibout )
  314. return false;
  315. return ReorderFaces<uint16_t>( ibin, nFaces, nullptr, faceRemap, ibout, nullptr );
  316. }
  317. _Use_decl_annotations_
  318. static Bool ReorderIB( uint16_t* ib, size_t nFaces, const uint32_t* faceRemap )
  319. {
  320. if ( !ib || !nFaces || !faceRemap )
  321. return false;
  322. if ( ( uint64_t(nFaces) * 3 ) >= UINT32_MAX )
  323. return false;
  324. return SwapFaces<uint16_t>( ib, nFaces, nullptr, faceRemap );
  325. }
  326. //-------------------------------------------------------------------------------------
  327. _Use_decl_annotations_
  328. static Bool ReorderIB( const uint32_t* ibin, size_t nFaces, const uint32_t* faceRemap, uint32_t* ibout )
  329. {
  330. if ( !ibin || !nFaces || !faceRemap || !ibout )
  331. return false;
  332. if ( ( uint64_t(nFaces) * 3 ) >= UINT32_MAX )
  333. return false;
  334. if ( ibin == ibout )
  335. return false;
  336. return ReorderFaces<uint32_t>( ibin, nFaces, nullptr, faceRemap, ibout, nullptr );
  337. }
  338. _Use_decl_annotations_
  339. static Bool ReorderIB( uint32_t* ib, size_t nFaces, const uint32_t* faceRemap )
  340. {
  341. if ( !ib || !nFaces || !faceRemap )
  342. return false;
  343. if ( ( uint64_t(nFaces) * 3 ) >= UINT32_MAX )
  344. return false;
  345. return SwapFaces<uint32_t>( ib, nFaces, nullptr, faceRemap );
  346. }
  347. //-------------------------------------------------------------------------------------
  348. // Applies a face remap reordering to an index buffer and adjacency
  349. //-------------------------------------------------------------------------------------
  350. _Use_decl_annotations_
  351. static Bool ReorderIBAndAdjacency( const uint16_t* ibin, size_t nFaces, const uint32_t* adjin, const uint32_t* faceRemap,
  352. uint16_t* ibout, uint32_t* adjout )
  353. {
  354. if ( !ibin || !nFaces || !adjin || !faceRemap || !ibout || !adjout )
  355. return false;
  356. if ( ( uint64_t(nFaces) * 3 ) >= UINT32_MAX )
  357. return false;
  358. if ( ( ibin == ibout ) || ( adjin == adjout ) )
  359. return false;
  360. return ReorderFaces<uint16_t>( ibin, nFaces, adjin, faceRemap, ibout, adjout );
  361. }
  362. _Use_decl_annotations_
  363. static Bool ReorderIBAndAdjacency( uint16_t* ib, size_t nFaces, uint32_t* adj, const uint32_t* faceRemap )
  364. {
  365. if ( !ib || !nFaces || !adj || !faceRemap )
  366. return false;
  367. if ( ( uint64_t(nFaces) * 3 ) >= UINT32_MAX )
  368. return false;
  369. return SwapFaces<uint16_t>( ib, nFaces, adj, faceRemap );
  370. }
  371. //-------------------------------------------------------------------------------------
  372. _Use_decl_annotations_
  373. static Bool ReorderIBAndAdjacency( const uint32_t* ibin, size_t nFaces, const uint32_t* adjin, const uint32_t* faceRemap,
  374. uint32_t* ibout, uint32_t* adjout )
  375. {
  376. if ( !ibin || !nFaces || !adjin || !faceRemap || !ibout || !adjout )
  377. return false;
  378. if ( ( uint64_t(nFaces) * 3 ) >= UINT32_MAX )
  379. return false;
  380. if ( ( ibin == ibout ) || ( adjin == adjout ) )
  381. return false;
  382. return ReorderFaces<uint32_t>( ibin, nFaces, adjin, faceRemap, ibout, adjout );
  383. }
  384. _Use_decl_annotations_
  385. static Bool ReorderIBAndAdjacency( uint32_t* ib, size_t nFaces, uint32_t* adj, const uint32_t* faceRemap )
  386. {
  387. if ( !ib || !nFaces || !adj || !faceRemap )
  388. return false;
  389. if ( ( uint64_t(nFaces) * 3 ) >= UINT32_MAX )
  390. return false;
  391. return SwapFaces<uint32_t>( ib, nFaces, adj, faceRemap );
  392. }
  393. //-------------------------------------------------------------------------------------
  394. // Applies a vertex remap, filling out a new index buffer
  395. //-------------------------------------------------------------------------------------
  396. _Use_decl_annotations_
  397. static Bool FinalizeIB( const uint16_t* ibin, size_t nFaces, const uint32_t* vertexRemap, size_t nVerts, uint16_t* ibout )
  398. {
  399. return _FinalizeIB<uint16_t>( ibin, nFaces, vertexRemap, nVerts, ibout );
  400. }
  401. _Use_decl_annotations_
  402. static Bool FinalizeIB( uint16_t* ib, size_t nFaces, const uint32_t* vertexRemap, size_t nVerts )
  403. {
  404. return _FinalizeIB<uint16_t>( ib, nFaces, vertexRemap, nVerts );
  405. }
  406. //-------------------------------------------------------------------------------------
  407. _Use_decl_annotations_
  408. static Bool FinalizeIB( const uint32_t* ibin, size_t nFaces, const uint32_t* vertexRemap, size_t nVerts, uint32_t* ibout )
  409. {
  410. return _FinalizeIB<uint32_t>( ibin, nFaces, vertexRemap, nVerts, ibout );
  411. }
  412. _Use_decl_annotations_
  413. static Bool FinalizeIB( uint32_t* ib, size_t nFaces, const uint32_t* vertexRemap, size_t nVerts )
  414. {
  415. return _FinalizeIB<uint32_t>( ib, nFaces, vertexRemap, nVerts );
  416. }
  417. //-------------------------------------------------------------------------------------
  418. // Applies a vertex remap and/or a vertex duplication set to a vertex buffer
  419. //-------------------------------------------------------------------------------------
  420. #pragma warning(push)
  421. #pragma warning( disable : 6101 )
  422. _Use_decl_annotations_
  423. static Bool FinalizeVB( const void* vbin, size_t stride, size_t nVerts,
  424. const uint32_t* dupVerts, size_t nDupVerts,
  425. const uint32_t* vertexRemap, void* vbout )
  426. {
  427. if ( !vbin || !stride || !nVerts || !vbout )
  428. return false;
  429. if ( !dupVerts && !vertexRemap )
  430. return false;
  431. if ( dupVerts && !nDupVerts )
  432. return false;
  433. if ( !dupVerts && nDupVerts > 0 )
  434. return false;
  435. if ( nVerts >= UINT32_MAX )
  436. return false;
  437. if ( (uint64_t(nVerts) + uint64_t(nDupVerts) ) >= UINT32_MAX )
  438. return false;
  439. if ( vbin == vbout )
  440. return false;
  441. size_t newVerts = nVerts + nDupVerts;
  442. const uint8_t *sptr = reinterpret_cast<const uint8_t*>( vbin );
  443. uint8_t *dptr = reinterpret_cast<uint8_t*>( vbout );
  444. #if DEBUG
  445. memset( vbout, 0, newVerts * stride );
  446. #endif
  447. for( size_t j = 0; j < nVerts; ++j )
  448. {
  449. uint32_t dest = ( vertexRemap ) ? vertexRemap[ j ] : uint32_t(j);
  450. if ( dest == UNUSED32 )
  451. {
  452. // remap entry is unused
  453. }
  454. else if ( dest < newVerts )
  455. {
  456. memcpy( dptr + dest * stride, sptr, stride );
  457. }
  458. else
  459. return false;
  460. sptr += stride;
  461. }
  462. if ( dupVerts )
  463. {
  464. for( size_t j = 0; j < nDupVerts; ++j )
  465. {
  466. uint32_t dup = dupVerts[ j ];
  467. uint32_t dest = ( vertexRemap ) ? vertexRemap[ nVerts + j ] : uint32_t( nVerts + j );
  468. if ( dest == UNUSED32 )
  469. {
  470. // remap entry is unused
  471. }
  472. else if ( dup < nVerts && dest < newVerts )
  473. {
  474. sptr = reinterpret_cast<const uint8_t*>( vbin ) + dup * stride;
  475. memcpy( dptr + dest * stride, sptr, stride );
  476. }
  477. else
  478. return false;
  479. }
  480. }
  481. return true;
  482. }
  483. #pragma warning(pop)
  484. _Use_decl_annotations_
  485. static Bool FinalizeVB( void* vb, size_t stride, size_t nVerts, const uint32_t* vertexRemap )
  486. {
  487. if ( nVerts >= UINT32_MAX )
  488. return false;
  489. return SwapVertices( vb, stride, nVerts, nullptr, vertexRemap );
  490. }
  491. //-------------------------------------------------------------------------------------
  492. // Applies a vertex remap and/or a vertex duplication set to a vertex buffer and
  493. // point representatives
  494. //-------------------------------------------------------------------------------------
  495. #pragma warning(push)
  496. #pragma warning( disable : 6101 )
  497. _Use_decl_annotations_
  498. static Bool FinalizeVBAndPointReps( const void* vbin, size_t stride, size_t nVerts, const uint32_t* prin,
  499. const uint32_t* dupVerts, size_t nDupVerts, const uint32_t* vertexRemap,
  500. void* vbout, uint32_t* prout )
  501. {
  502. if ( !vbin || !stride || !nVerts || !prin || !vbout || !prout )
  503. return false;
  504. if ( !dupVerts && !vertexRemap )
  505. return false;
  506. if ( dupVerts && !nDupVerts )
  507. return false;
  508. if ( !dupVerts && nDupVerts > 0 )
  509. return false;
  510. if ( nVerts >= UINT32_MAX )
  511. return false;
  512. if ( (uint64_t(nVerts) + uint64_t(nDupVerts) ) >= UINT32_MAX )
  513. return false;
  514. if ( vbin == vbout )
  515. return false;
  516. size_t newVerts = nVerts + nDupVerts;
  517. const uint8_t *sptr = reinterpret_cast<const uint8_t*>( vbin );
  518. uint8_t *dptr = reinterpret_cast<uint8_t*>( vbout );
  519. #if DEBUG
  520. memset( vbout, 0, newVerts * stride );
  521. #endif
  522. std__unique_ptr<uint32_t> pointRep( nVerts + nDupVerts );
  523. memcpy( pointRep.get(), prin, sizeof(uint32_t) * nVerts );
  524. for( size_t i = 0; i < nDupVerts; ++i )
  525. {
  526. pointRep[ i + nVerts ] = prin[ dupVerts[ i ] ];
  527. }
  528. if ( vertexRemap )
  529. {
  530. // clean up point reps for any removed vertices
  531. for( uint32_t i = 0; i < newVerts; ++i )
  532. {
  533. if ( vertexRemap[ i ] != UNUSED32 )
  534. {
  535. uint32_t old = pointRep[ i ];
  536. if ( ( old != UNUSED32 ) && ( vertexRemap[old] == UNUSED32 ) )
  537. {
  538. pointRep[ i ] = i;
  539. for( size_t k = (i+1); k < newVerts; ++k )
  540. {
  541. if ( pointRep[ k ] == old )
  542. pointRep[ k ] = i;
  543. }
  544. }
  545. }
  546. }
  547. }
  548. size_t j = 0;
  549. for( ; j < nVerts; ++j )
  550. {
  551. uint32_t dest = ( vertexRemap ) ? vertexRemap[ j ] : uint32_t(j);
  552. if ( dest == UNUSED32 )
  553. {
  554. // remap entry is unused
  555. }
  556. else if ( dest < newVerts )
  557. {
  558. memcpy( dptr + dest * stride, sptr, stride );
  559. uint32_t pr = pointRep[ j ];
  560. if ( pr < newVerts )
  561. {
  562. prout[ dest ] = ( vertexRemap ) ? vertexRemap[ pr ] : pr;
  563. }
  564. }
  565. else
  566. return false;
  567. sptr += stride;
  568. }
  569. if ( dupVerts )
  570. {
  571. for( size_t k = 0; k < nDupVerts; ++k )
  572. {
  573. uint32_t dup = dupVerts[ k ];
  574. uint32_t dest = ( vertexRemap ) ? vertexRemap[ nVerts + k ] : uint32_t( nVerts + k );
  575. if ( dest == UNUSED32 )
  576. {
  577. // remap entry is unused
  578. }
  579. else if ( dup < nVerts && dest < newVerts )
  580. {
  581. sptr = reinterpret_cast<const uint8_t*>( vbin ) + dup * stride;
  582. memcpy( dptr + dest * stride, sptr, stride );
  583. uint32_t pr = pointRep[ nVerts + k ];
  584. if (pr < (nVerts + nDupVerts) )
  585. {
  586. prout[ dest ] = ( vertexRemap ) ? vertexRemap[ pr ] : pr;
  587. }
  588. }
  589. else
  590. return false;
  591. }
  592. }
  593. return true;
  594. }
  595. #pragma warning(pop)
  596. _Use_decl_annotations_
  597. static Bool FinalizeVBAndPointReps( void* vb, size_t stride, size_t nVerts, uint32_t* pointRep, const uint32_t* vertexRemap )
  598. {
  599. if ( nVerts >= UINT32_MAX )
  600. return false;
  601. if ( !pointRep || !vertexRemap )
  602. return false;
  603. // clean up point reps for any removed vertices
  604. for( uint32_t i = 0; i < nVerts; ++i )
  605. {
  606. if ( vertexRemap[ i ] != UNUSED32 )
  607. {
  608. uint32_t old = pointRep[ i ];
  609. if ( ( old != UNUSED32 ) && ( vertexRemap[old] == UNUSED32 ) )
  610. {
  611. pointRep[ i ] = i;
  612. for( size_t k = (i+1); k < nVerts; ++k )
  613. {
  614. if ( pointRep[ k ] == old )
  615. pointRep[ k ] = i;
  616. }
  617. }
  618. }
  619. }
  620. return SwapVertices( vb, stride, nVerts, pointRep, vertexRemap );
  621. }
  622. //-------------------------------------------------------------------------------------
  623. template<class index_t>
  624. class mesh_status
  625. {
  626. public:
  627. mesh_status() :
  628. mUnprocessed{},
  629. mFaceOffset(0),
  630. mFaceCount(0),
  631. mMaxSubset(0),
  632. mTotalFaces(0)
  633. {
  634. }
  635. Bool initialize( _In_reads_(nFaces*3) const index_t* indices, size_t nFaces,
  636. _In_reads_(nFaces*3) const uint32_t* adjacency, _In_ const std__vector< std__pair<size_t,size_t> >& subsets )
  637. {
  638. if ( !indices || !nFaces || !adjacency || subsets.empty() )
  639. return false;
  640. // Convert adjacency to 'physical' adjacency
  641. mPhysicalNeighbors.reset( nFaces );
  642. if ( !mPhysicalNeighbors )
  643. return false;
  644. #if DEBUG
  645. memset( mPhysicalNeighbors.get(), 0xcd, sizeof(neighborInfo) * nFaces );
  646. #endif
  647. mFaceOffset = 0;
  648. mFaceCount = 0;
  649. mMaxSubset = 0;
  650. mTotalFaces = nFaces;
  651. for( const std__pair<size_t,size_t>* it = subsets.cbegin(); it != subsets.cend(); ++it )
  652. {
  653. if ( ( uint64_t(it->first) + uint64_t(it->second) ) >= UINT32_MAX )
  654. return false;
  655. if ( it->second > mMaxSubset )
  656. {
  657. mMaxSubset = it->second;
  658. }
  659. uint32_t faceOffset = uint32_t( it->first );
  660. uint32_t faceMax = uint32_t( it->first + it->second );
  661. for( uint32_t face = faceOffset; face < faceMax; ++face )
  662. {
  663. if ( face >= nFaces )
  664. return false;
  665. index_t i0 = indices[ face*3 ];
  666. index_t i1 = indices[ face*3 + 1 ];
  667. index_t i2 = indices[ face*3 + 2 ];
  668. if ( i0 == index_t(-1)
  669. || i1 == index_t(-1)
  670. || i2 == index_t(-1)
  671. || i0 == i1
  672. || i0 == i2
  673. || i1 == i2 )
  674. {
  675. // unused and degenerate faces should not have neighbors
  676. for( uint32_t point = 0; point < 3; ++point )
  677. {
  678. uint32_t k = adjacency[ face * 3 + point ];
  679. if ( k != UNUSED32 )
  680. {
  681. if ( k >= nFaces )
  682. return false;
  683. if ( adjacency[ k*3 ] == face )
  684. mPhysicalNeighbors[ k ].neighbors[ 0 ] = UNUSED32;
  685. if ( adjacency[ k*3 + 1 ] == face )
  686. mPhysicalNeighbors[ k ].neighbors[ 1 ] = UNUSED32;
  687. if ( adjacency[ k*3 + 2 ] == face )
  688. mPhysicalNeighbors[ k ].neighbors[ 2 ] = UNUSED32;
  689. }
  690. mPhysicalNeighbors[ face ].neighbors[ point ] = UNUSED32;
  691. }
  692. }
  693. else
  694. {
  695. for( uint32_t n = 0; n < 3; ++n )
  696. {
  697. uint32_t neighbor = adjacency[ face * 3 + n ];
  698. if ( neighbor != UNUSED32 )
  699. {
  700. if ( ( neighbor < faceOffset ) || ( neighbor >= faceMax )
  701. || ( neighbor == adjacency[ face * 3 + ( ( n + 1 ) % 3 ) ] )
  702. || ( neighbor == adjacency[ face * 3 + ( ( n + 2 ) % 3 ) ] ) )
  703. {
  704. // Break links for any neighbors outside of our attribute set, and remove duplicate neighbors
  705. neighbor = UNUSED32;
  706. }
  707. else
  708. {
  709. uint32_t edgeBack = find_edge<uint32_t>( &adjacency[ neighbor * 3 ], face );
  710. if ( edgeBack < 3 )
  711. {
  712. index_t p1 = indices[ face * 3 + n ];
  713. index_t p2 = indices[ face * 3 + ( ( n + 1 ) % 3 ) ];
  714. index_t pn1 = indices[ neighbor * 3 + edgeBack ];
  715. index_t pn2 = indices[ neighbor * 3 + ( ( edgeBack + 1 ) % 3 ) ];
  716. // if wedge not identical on shared edge, drop link
  717. if ( ( p1 != pn2 ) || ( p2 != pn1 ) )
  718. {
  719. neighbor = UNUSED32;
  720. }
  721. }
  722. else
  723. {
  724. neighbor = UNUSED32;
  725. }
  726. }
  727. }
  728. mPhysicalNeighbors[ face ].neighbors[ n ] = neighbor;
  729. }
  730. }
  731. }
  732. }
  733. if ( !mMaxSubset )
  734. return false;
  735. mListElements.reset( mMaxSubset );
  736. if ( !mListElements )
  737. return false;
  738. return true;
  739. }
  740. Bool setSubset( _In_reads_(nFaces*3) const index_t* indices, size_t nFaces, size_t faceOffset, size_t faceCount )
  741. {
  742. if ( !faceCount || !indices || !nFaces )
  743. return false;
  744. if ( faceCount > mMaxSubset )
  745. return false;
  746. if ( !mListElements )
  747. return false;
  748. if ( ( uint64_t(faceOffset) + uint64_t(faceCount) ) >= UINT32_MAX )
  749. return false;
  750. uint32_t faceMax = uint32_t( faceOffset + faceCount );
  751. if ( faceMax > nFaces )
  752. return false;
  753. mFaceOffset = faceOffset;
  754. mFaceCount = faceCount;
  755. mUnprocessed[0] = UNUSED32;
  756. mUnprocessed[1] = UNUSED32;
  757. mUnprocessed[2] = UNUSED32;
  758. mUnprocessed[3] = UNUSED32;
  759. for( uint32_t face = uint32_t( faceOffset ); face < faceMax; ++face )
  760. {
  761. index_t i0 = indices[ face*3 ];
  762. index_t i1 = indices[ face*3 + 1 ];
  763. index_t i2 = indices[ face*3 + 2 ];
  764. if ( i0 == index_t(-1)
  765. || i1 == index_t(-1)
  766. || i2 == index_t(-1) )
  767. {
  768. // filter out unused triangles
  769. continue;
  770. }
  771. uint32_t unprocessed = 0;
  772. for( uint32_t n = 0; n < 3; ++n )
  773. {
  774. if ( mPhysicalNeighbors[ face ].neighbors[ n ] != UNUSED32 )
  775. {
  776. unprocessed += 1;
  777. assert( mPhysicalNeighbors[ face ].neighbors[ n ] >= mFaceOffset );
  778. assert( mPhysicalNeighbors[ face ].neighbors[ n ] < faceMax );
  779. }
  780. }
  781. uint32_t faceIndex = uint32_t( face - faceOffset );
  782. mListElements[ faceIndex ].processed = false;
  783. mListElements[ faceIndex ].unprocessed = unprocessed;
  784. push_front( faceIndex );
  785. }
  786. return true;
  787. }
  788. bool isprocessed( uint32_t face ) const
  789. {
  790. assert( face < mTotalFaces );
  791. assert( ( face >= mFaceOffset ) || ( face < ( mFaceOffset + mFaceCount ) ) );
  792. return mListElements[ face - mFaceOffset ].processed;
  793. }
  794. uint32_t unprocessed_count( uint32_t face ) const
  795. {
  796. assert( face < mTotalFaces );
  797. assert( ( face >= mFaceOffset ) || ( face < ( mFaceOffset + mFaceCount ) ) );
  798. return mListElements[ face - mFaceOffset ].unprocessed;
  799. }
  800. uint32_t find_initial() const
  801. {
  802. for( size_t j = 0; j < 4; ++j )
  803. {
  804. if ( mUnprocessed[j] != UNUSED32 )
  805. return uint32_t( mUnprocessed[ j ] + mFaceOffset );
  806. }
  807. return UNUSED32;
  808. }
  809. void mark( uint32_t face )
  810. {
  811. assert( face < mTotalFaces );
  812. assert( ( face >= mFaceOffset ) || ( face < ( mFaceOffset + mFaceCount ) ) );
  813. uint32_t faceIndex = uint32_t( face - mFaceOffset );
  814. assert( !mListElements[ faceIndex ].processed );
  815. mListElements[ faceIndex ].processed = true;
  816. remove( faceIndex );
  817. for( uint32_t n = 0; n < 3; ++n )
  818. {
  819. uint32_t neighbor = mPhysicalNeighbors[ face ].neighbors[ n ];
  820. if ( ( neighbor != UNUSED32 ) && !isprocessed( neighbor ) )
  821. {
  822. decrement( neighbor );
  823. }
  824. }
  825. }
  826. uint32_t find_next( uint32_t face ) const
  827. {
  828. assert( face < mTotalFaces );
  829. assert( ( face >= mFaceOffset ) || ( face < ( mFaceOffset + mFaceCount ) ) );
  830. uint32_t iret = 3;
  831. uint32_t minNeighbor = UNUSED32;
  832. uint32_t minNextNeighbor = 0;
  833. for( uint32_t n = 0; n < 3; ++n )
  834. {
  835. uint32_t neighbor = mPhysicalNeighbors[ face ].neighbors[ n ];
  836. if( ( neighbor == UNUSED32 ) || isprocessed( neighbor ) )
  837. continue;
  838. uint32_t unprocessed = unprocessed_count( neighbor );
  839. assert( unprocessed < 3 );
  840. uint32_t mintemp = UNUSED32;
  841. for( uint32_t nt = 0; nt < 3; ++nt )
  842. {
  843. uint32_t neighborTemp = mPhysicalNeighbors[ neighbor ].neighbors[ nt ];
  844. if( ( neighborTemp == UNUSED32 ) || isprocessed( neighborTemp ) )
  845. continue;
  846. uint32_t next_count = unprocessed_count( neighborTemp );
  847. if ( next_count < mintemp )
  848. mintemp = next_count;
  849. }
  850. if ( mintemp == UNUSED32 )
  851. mintemp = 0;
  852. if ( unprocessed < minNeighbor )
  853. {
  854. iret = n;
  855. minNeighbor = unprocessed;
  856. minNextNeighbor = mintemp;
  857. }
  858. else if ( ( unprocessed == minNeighbor ) && ( mintemp < minNextNeighbor ) )
  859. {
  860. iret = n;
  861. minNextNeighbor = mintemp;
  862. }
  863. }
  864. return iret;
  865. }
  866. const uint32_t get_neighbors( uint32_t face, uint32_t n ) const
  867. {
  868. assert( face < mTotalFaces );
  869. assert( n < 3 );
  870. return mPhysicalNeighbors[ face ].neighbors[ n ];
  871. }
  872. const uint32_t* get_neighborsPtr( uint32_t face ) const
  873. {
  874. assert( face < mTotalFaces );
  875. return &mPhysicalNeighbors[ face ].neighbors[ 0 ];
  876. }
  877. private:
  878. void push_front( uint32_t faceIndex )
  879. {
  880. assert( faceIndex < mFaceCount );
  881. uint32_t unprocessed = mListElements[ faceIndex ].unprocessed;
  882. uint32_t head = mUnprocessed[ unprocessed ];
  883. mListElements[ faceIndex ].next = head;
  884. if ( head != UNUSED32 )
  885. mListElements[ head ].prev = faceIndex;
  886. mUnprocessed[ unprocessed ] = faceIndex;
  887. mListElements[ faceIndex ].prev = UNUSED32;
  888. }
  889. void remove( uint32_t faceIndex )
  890. {
  891. assert( faceIndex < mFaceCount );
  892. if ( mListElements[ faceIndex ].prev != UNUSED32 )
  893. {
  894. assert( mUnprocessed[ mListElements[ faceIndex ].unprocessed ] != faceIndex );
  895. uint32_t prev = mListElements[ faceIndex ].prev;
  896. uint32_t next = mListElements[ faceIndex ].next;
  897. mListElements[ prev ].next = next;
  898. if ( next != UNUSED32 )
  899. {
  900. mListElements[ next ].prev = prev;
  901. }
  902. }
  903. else
  904. {
  905. // remove head of the list
  906. assert( mUnprocessed[ mListElements[ faceIndex ].unprocessed ] == faceIndex );
  907. uint32_t unprocessed = mListElements[ faceIndex ].unprocessed;
  908. mUnprocessed[ unprocessed ] = mListElements[ faceIndex ].next;
  909. if ( mUnprocessed[ unprocessed ] != UNUSED32 )
  910. {
  911. mListElements[ mUnprocessed[ unprocessed ] ].prev = UNUSED32;
  912. }
  913. }
  914. mListElements[ faceIndex ].prev =
  915. mListElements[ faceIndex ].next = UNUSED32;
  916. }
  917. void decrement( uint32_t face )
  918. {
  919. assert( face < mTotalFaces );
  920. assert( ( face >= mFaceOffset ) || ( face < ( mFaceOffset + mFaceCount ) ) );
  921. assert( !isprocessed(face) );
  922. uint32_t faceIndex = uint32_t( face - mFaceOffset );
  923. assert( (mListElements[faceIndex].unprocessed >= 1) && (mListElements[faceIndex].unprocessed <= 3) );
  924. remove( faceIndex );
  925. mListElements[ faceIndex ].unprocessed -= 1;
  926. push_front( faceIndex );
  927. }
  928. struct neighborInfo
  929. {
  930. uint32_t neighbors[3];
  931. };
  932. struct listElement
  933. {
  934. bool processed;
  935. uint32_t unprocessed;
  936. uint32_t prev;
  937. uint32_t next;
  938. };
  939. uint32_t mUnprocessed[4];
  940. size_t mFaceOffset;
  941. size_t mFaceCount;
  942. size_t mMaxSubset;
  943. size_t mTotalFaces;
  944. std__unique_ptr<listElement> mListElements;
  945. std__unique_ptr<neighborInfo> mPhysicalNeighbors;
  946. };
  947. //-------------------------------------------------------------------------------------
  948. typedef std__pair<uint32_t,uint32_t> facecorner_t;
  949. template<class index_t>
  950. inline facecorner_t counterclockwise_corner( facecorner_t corner, mesh_status<index_t>& status )
  951. {
  952. assert( corner.second != UNUSED32 );
  953. uint32_t edge = ( corner.second + 2 ) % 3;
  954. uint32_t neighbor = status.get_neighbors( corner.first, edge );
  955. uint32_t point = ( neighbor == UNUSED32 ) ? UNUSED32 : find_edge( status.get_neighborsPtr( neighbor ), corner.first );
  956. return facecorner_t( neighbor, point );
  957. }
  958. //-------------------------------------------------------------------------------------
  959. class sim_vcache
  960. {
  961. public:
  962. sim_vcache() : mTail(0), mCacheSize(0) {}
  963. Bool initialize( uint32_t cacheSize )
  964. {
  965. if ( !cacheSize )
  966. return false;
  967. mFIFO.reset(cacheSize);
  968. if ( !mFIFO )
  969. return false;
  970. mCacheSize = cacheSize;
  971. clear();
  972. return true;
  973. }
  974. void clear()
  975. {
  976. assert( mFIFO != 0 );
  977. mTail = 0;
  978. memset( mFIFO.get(), 0xff, sizeof(uint32_t) * mCacheSize );
  979. }
  980. bool access( uint32_t vertex )
  981. {
  982. assert( vertex != UNUSED32 );
  983. assert( mFIFO != 0 );
  984. for( size_t ptr = 0; ptr < mCacheSize; ++ptr )
  985. {
  986. if ( mFIFO[ ptr ] == vertex )
  987. {
  988. return true;
  989. }
  990. }
  991. mFIFO[ mTail ] = vertex;
  992. mTail += 1;
  993. if ( mTail == mCacheSize )
  994. mTail = 0;
  995. return false;
  996. }
  997. private:
  998. uint32_t mTail;
  999. uint32_t mCacheSize;
  1000. std__unique_ptr<uint32_t> mFIFO;
  1001. };
  1002. //-------------------------------------------------------------------------------------
  1003. template<class index_t>
  1004. static Bool _StripReorder( _In_reads_(nFaces*3) const index_t* indices, _In_ size_t nFaces,
  1005. _In_reads_(nFaces*3) const uint32_t* adjacency,
  1006. _In_reads_opt_(nFaces) const uint32_t* attributes,
  1007. _Out_writes_(nFaces) uint32_t* faceRemap )
  1008. {
  1009. std__vector< std__pair<size_t,size_t> > subsets; ComputeSubsets( subsets, attributes, nFaces );
  1010. assert( !subsets.empty() );
  1011. mesh_status<index_t> status;
  1012. if(!status.initialize( indices, nFaces, adjacency, subsets ))return false;
  1013. std__unique_ptr<uint32_t> faceRemapInverse(nFaces);
  1014. if ( !faceRemapInverse )return false;
  1015. memset( faceRemapInverse.get(), 0xff, sizeof(uint32_t) * nFaces );
  1016. for( const std__pair<size_t,size_t>* it = subsets.cbegin(); it != subsets.cend(); ++it )
  1017. {
  1018. if(!status.setSubset( indices, nFaces, it->first, it->second ))return false;
  1019. uint32_t curface = 0;
  1020. for(;;)
  1021. {
  1022. uint32_t face = status.find_initial();
  1023. if ( face == UNUSED32 )
  1024. break;
  1025. status.mark( face );
  1026. uint32_t next = status.find_next( face );
  1027. for(;;)
  1028. {
  1029. assert( face != UNUSED32 );
  1030. faceRemapInverse[ face ] = uint32_t( curface + it->first );
  1031. curface += 1;
  1032. // if at end of strip, break out
  1033. if ( next >= 3 )
  1034. break;
  1035. face = status.get_neighbors( face, next );
  1036. assert( face != UNUSED32 );
  1037. status.mark( face );
  1038. next = status.find_next( face );
  1039. }
  1040. }
  1041. }
  1042. // inverse remap
  1043. memset( faceRemap, 0xff, sizeof(uint32_t) * nFaces );
  1044. for( uint32_t j = 0; j < nFaces; ++j )
  1045. {
  1046. uint32_t f = faceRemapInverse[ j ];
  1047. if ( f < nFaces )
  1048. {
  1049. faceRemap[ f ] = j;
  1050. }
  1051. }
  1052. return true;
  1053. }
  1054. //-------------------------------------------------------------------------------------
  1055. template<class index_t>
  1056. static Bool _VertexCacheStripReorder( _In_reads_(nFaces*3) const index_t* indices, _In_ size_t nFaces,
  1057. _In_reads_(nFaces*3) const uint32_t* adjacency,
  1058. _In_reads_opt_(nFaces) const uint32_t* attributes,
  1059. _Out_writes_(nFaces) uint32_t* faceRemap,
  1060. uint32_t vertexCache, uint32_t restart )
  1061. {
  1062. std__vector< std__pair<size_t,size_t> > subsets; ComputeSubsets( subsets, attributes, nFaces );
  1063. assert( !subsets.empty() );
  1064. mesh_status<index_t> status;
  1065. if(!status.initialize( indices, nFaces, adjacency, subsets ))return false;
  1066. sim_vcache vcache;
  1067. if(!vcache.initialize( vertexCache ))return false;
  1068. std__unique_ptr<uint32_t> faceRemapInverse( nFaces );
  1069. if ( !faceRemapInverse )return false;
  1070. memset( faceRemapInverse.get(), 0xff, sizeof(uint32_t) * nFaces );
  1071. assert( vertexCache >= restart );
  1072. uint32_t desired = vertexCache - restart;
  1073. for( const std__pair<size_t,size_t>* it = subsets.cbegin(); it != subsets.cend(); ++it )
  1074. {
  1075. if(!status.setSubset( indices, nFaces, it->first, it->second ))return false;
  1076. vcache.clear();
  1077. uint32_t locnext = 0;
  1078. facecorner_t nextCorner( UNUSED32, UNUSED32 );
  1079. facecorner_t curCorner( UNUSED32, UNUSED32 );
  1080. uint32_t curface = 0;
  1081. for(;;)
  1082. {
  1083. assert( nextCorner.first == UNUSED32 );
  1084. curCorner.first = status.find_initial();
  1085. if ( curCorner.first == UNUSED32 )
  1086. break;
  1087. uint32_t n0 = status.get_neighbors( curCorner.first, 0 );
  1088. if ( ( n0 != UNUSED32 ) && !status.isprocessed( n0 ) )
  1089. {
  1090. curCorner.second = 1;
  1091. }
  1092. else
  1093. {
  1094. uint32_t n1 = status.get_neighbors( curCorner.first, 1 );
  1095. if ( ( n1 != UNUSED32 ) && !status.isprocessed( n1 ) )
  1096. {
  1097. curCorner.second = 2;
  1098. }
  1099. else
  1100. {
  1101. curCorner.second = 0;
  1102. }
  1103. }
  1104. bool striprestart = false;
  1105. for(;;)
  1106. {
  1107. assert( curCorner.first != UNUSED32 );
  1108. assert( !status.isprocessed( curCorner.first ) );
  1109. // Decision: either add a ring of faces or restart strip
  1110. if ( nextCorner.first != UNUSED32 )
  1111. {
  1112. uint32_t nf = 0;
  1113. for( facecorner_t temp = curCorner; ; )
  1114. {
  1115. facecorner_t next = counterclockwise_corner<index_t>( temp, status );
  1116. if ( ( next.first == UNUSED32 ) || status.isprocessed( next.first ) )
  1117. break;
  1118. ++nf;
  1119. temp = next;
  1120. }
  1121. if ( locnext + nf > desired )
  1122. {
  1123. // restart
  1124. if ( !status.isprocessed( nextCorner.first ) )
  1125. {
  1126. curCorner = nextCorner;
  1127. }
  1128. nextCorner.first = UNUSED32;
  1129. }
  1130. }
  1131. for(;;)
  1132. {
  1133. assert( curCorner.first != UNUSED32 );
  1134. status.mark( curCorner.first );
  1135. faceRemapInverse[ curCorner.first ] = uint32_t( curface + it->first );
  1136. curface += 1;
  1137. assert( indices[ curCorner.first * 3 ] != index_t(-1) );
  1138. if ( !vcache.access( indices[ curCorner.first * 3 ] ) )
  1139. locnext += 1;
  1140. assert( indices[ curCorner.first * 3 + 1 ] != index_t(-1) );
  1141. if ( !vcache.access( indices[ curCorner.first * 3 + 1 ] ) )
  1142. locnext += 1;
  1143. assert( indices[ curCorner.first * 3 + 2 ] != index_t(-1) );
  1144. if ( !vcache.access( indices[ curCorner.first * 3 + 2 ] ) )
  1145. locnext += 1;
  1146. facecorner_t intCorner = counterclockwise_corner<index_t>( curCorner, status );
  1147. bool interiornei = ( intCorner.first != UNUSED32 ) && !status.isprocessed( intCorner.first );
  1148. facecorner_t extCorner = counterclockwise_corner<index_t>( facecorner_t( curCorner.first, ( curCorner.second + 2 ) % 3 ), status );
  1149. bool exteriornei = ( extCorner.first != UNUSED32 ) && !status.isprocessed( extCorner.first );
  1150. if ( interiornei )
  1151. {
  1152. if ( exteriornei )
  1153. {
  1154. if ( nextCorner.first == UNUSED32 )
  1155. {
  1156. nextCorner = extCorner;
  1157. locnext = 0;
  1158. }
  1159. }
  1160. curCorner = intCorner;
  1161. }
  1162. else if ( exteriornei )
  1163. {
  1164. curCorner = extCorner;
  1165. break;
  1166. }
  1167. else
  1168. {
  1169. curCorner = nextCorner;
  1170. nextCorner.first = UNUSED32;
  1171. if ( ( curCorner.first == UNUSED32 ) || status.isprocessed( curCorner.first ) )
  1172. {
  1173. striprestart = true;
  1174. break;
  1175. }
  1176. }
  1177. }
  1178. if ( striprestart )
  1179. break;
  1180. }
  1181. }
  1182. }
  1183. // inverse remap
  1184. memset( faceRemap, 0xff, sizeof(uint32_t) * nFaces );
  1185. for( uint32_t j = 0; j < nFaces; ++j )
  1186. {
  1187. uint32_t f = faceRemapInverse[ j ];
  1188. if ( f < nFaces )
  1189. {
  1190. faceRemap[ f ] = j;
  1191. }
  1192. }
  1193. return true;
  1194. }
  1195. //-------------------------------------------------------------------------------------
  1196. template<class index_t>
  1197. static Bool _OptimizeVertices( const index_t* indices, size_t nFaces, size_t nVerts, uint32_t* vertexRemap )
  1198. {
  1199. if ( !indices || !nFaces || !nVerts || !vertexRemap )
  1200. return false;
  1201. if ( nVerts >= index_t(-1) )
  1202. return false;
  1203. if ( ( uint64_t(nFaces) * 3 ) >= UINT32_MAX )
  1204. return false;
  1205. std__unique_ptr<uint32_t> tempRemap( nVerts );
  1206. if ( !tempRemap )
  1207. return false;
  1208. memset( tempRemap.get(), 0xff, sizeof(uint32_t) * nVerts );
  1209. uint32_t curvertex = 0;
  1210. for( size_t j = 0; j < (nFaces * 3); ++j )
  1211. {
  1212. index_t curindex = indices[ j ];
  1213. if ( curindex == index_t(-1) )
  1214. continue;
  1215. if ( curindex >= nVerts )
  1216. return false;
  1217. if ( tempRemap[ curindex ] == UNUSED32 )
  1218. {
  1219. tempRemap[ curindex ] = curvertex;
  1220. ++curvertex;
  1221. }
  1222. }
  1223. // inverse lookup
  1224. memset( vertexRemap, 0xff, sizeof(uint32_t) * nVerts );
  1225. for( uint32_t j = 0; j < nVerts; ++j )
  1226. {
  1227. uint32_t vertindex = tempRemap[ j ];
  1228. if ( vertindex != UNUSED32 )
  1229. {
  1230. if ( vertindex >= nVerts )
  1231. return false;
  1232. vertexRemap[ vertindex ] = j;
  1233. }
  1234. }
  1235. return true;
  1236. }
  1237. };
  1238. namespace DirectX
  1239. {
  1240. //=====================================================================================
  1241. // Entry-points
  1242. //=====================================================================================
  1243. //-------------------------------------------------------------------------------------
  1244. /*_Use_decl_annotations_
  1245. HRESULT AttributeSort( size_t nFaces, uint32_t* attributes, uint32_t* faceRemap )
  1246. {
  1247. if ( !nFaces || !attributes || !faceRemap )
  1248. return E_INVALIDARG;
  1249. if ( ( uint64_t(nFaces) * 3 ) >= UINT32_MAX )
  1250. return HRESULT_FROM_WIN32( ERROR_ARITHMETIC_OVERFLOW );
  1251. typedef std__pair<uint32_t,uint32_t> intpair_t;
  1252. std__vector<intpair_t> list;
  1253. list.reserve( nFaces );
  1254. for( uint32_t j=0; j < nFaces; ++j )
  1255. {
  1256. list.emplace_back( intpair_t( attributes[ j ], j ) );
  1257. }
  1258. std::stable_sort( list.begin(), list.end(), [](const intpair_t& a, const intpair_t& b ) -> bool
  1259. {
  1260. return (a.first < b.first);
  1261. });
  1262. auto it = list.begin();
  1263. for( uint32_t j = 0; j < nFaces; ++j, ++it )
  1264. {
  1265. attributes[ j ] = it->first;
  1266. faceRemap[ j ] = it->second;
  1267. }
  1268. return S_OK;
  1269. }*/
  1270. //-------------------------------------------------------------------------------------
  1271. _Use_decl_annotations_
  1272. static Bool OptimizeFaces( const uint16_t* indices, size_t nFaces, const uint32_t* adjacency,
  1273. uint32_t* faceRemap, uint32_t vertexCache=OPTFACES_V_DEFAULT, uint32_t restart=OPTFACES_R_DEFAULT )
  1274. {
  1275. if ( !indices || !nFaces || !adjacency || !faceRemap )
  1276. return false;
  1277. if ( ( uint64_t(nFaces) * 3 ) >= UINT32_MAX )
  1278. return false;
  1279. if( vertexCache == OPTFACES_V_STRIPORDER )
  1280. {
  1281. return _StripReorder<uint16_t>( indices, nFaces, adjacency, nullptr, faceRemap );
  1282. }
  1283. else
  1284. {
  1285. if ( restart > vertexCache )
  1286. return false;
  1287. return _VertexCacheStripReorder<uint16_t>( indices, nFaces, adjacency, nullptr, faceRemap, vertexCache, restart );
  1288. }
  1289. }
  1290. _Use_decl_annotations_
  1291. static Bool OptimizeFaces( const uint32_t* indices, size_t nFaces, const uint32_t* adjacency,
  1292. uint32_t* faceRemap, uint32_t vertexCache=OPTFACES_V_DEFAULT, uint32_t restart=OPTFACES_R_DEFAULT )
  1293. {
  1294. if ( !indices || !nFaces || !adjacency || !faceRemap )
  1295. return false;
  1296. if ( ( uint64_t(nFaces) * 3 ) >= UINT32_MAX )
  1297. return false;
  1298. if( vertexCache == OPTFACES_V_STRIPORDER )
  1299. {
  1300. return _StripReorder<uint32_t>( indices, nFaces, adjacency, nullptr, faceRemap );
  1301. }
  1302. else
  1303. {
  1304. if ( restart > vertexCache )
  1305. return false;
  1306. return _VertexCacheStripReorder<uint32_t>( indices, nFaces, adjacency, nullptr, faceRemap, vertexCache, restart );
  1307. }
  1308. }
  1309. //-------------------------------------------------------------------------------------
  1310. _Use_decl_annotations_
  1311. static Bool OptimizeFacesEx( const uint16_t* indices, size_t nFaces, const uint32_t* adjacency, const uint32_t* attributes,
  1312. uint32_t* faceRemap, uint32_t vertexCache=OPTFACES_V_DEFAULT, uint32_t restart=OPTFACES_R_DEFAULT )
  1313. {
  1314. if ( !indices || !nFaces || !adjacency || !attributes || !faceRemap )
  1315. return false;
  1316. if ( ( uint64_t(nFaces) * 3 ) >= UINT32_MAX )
  1317. return false;
  1318. if( vertexCache == OPTFACES_V_STRIPORDER )
  1319. {
  1320. return _StripReorder<uint16_t>( indices, nFaces, adjacency, attributes, faceRemap );
  1321. }
  1322. else
  1323. {
  1324. if ( restart > vertexCache )
  1325. return false;
  1326. return _VertexCacheStripReorder<uint16_t>( indices, nFaces, adjacency, attributes, faceRemap, vertexCache, restart );
  1327. }
  1328. }
  1329. _Use_decl_annotations_
  1330. static Bool OptimizeFacesEx( const uint32_t* indices, size_t nFaces, const uint32_t* adjacency, const uint32_t* attributes,
  1331. uint32_t* faceRemap, uint32_t vertexCache=OPTFACES_V_DEFAULT, uint32_t restart=OPTFACES_R_DEFAULT )
  1332. {
  1333. if ( !indices || !nFaces || !adjacency || !attributes || !faceRemap )
  1334. return false;
  1335. if ( ( uint64_t(nFaces) * 3 ) >= UINT32_MAX )
  1336. return false;
  1337. if( vertexCache == OPTFACES_V_STRIPORDER )
  1338. {
  1339. return _StripReorder<uint32_t>( indices, nFaces, adjacency, attributes, faceRemap );
  1340. }
  1341. else
  1342. {
  1343. if ( restart > vertexCache )
  1344. return false;
  1345. return _VertexCacheStripReorder<uint32_t>( indices, nFaces, adjacency, attributes, faceRemap, vertexCache, restart );
  1346. }
  1347. }
  1348. //-------------------------------------------------------------------------------------
  1349. _Use_decl_annotations_
  1350. static Bool OptimizeVertices( const uint16_t* indices, size_t nFaces, size_t nVerts, uint32_t* vertexRemap )
  1351. {
  1352. return _OptimizeVertices<uint16_t>( indices, nFaces, nVerts, vertexRemap );
  1353. }
  1354. _Use_decl_annotations_
  1355. static Bool OptimizeVertices( const uint32_t* indices, size_t nFaces, size_t nVerts, uint32_t* vertexRemap )
  1356. {
  1357. return _OptimizeVertices<uint32_t>( indices, nFaces, nVerts, vertexRemap );
  1358. }
  1359. } // namespace DirectX