indexgenerator.cpp 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704
  1. // This file is part of meshoptimizer library; see meshoptimizer.h for version/license details
  2. #include "meshoptimizer.h"
  3. #include <assert.h>
  4. #include <string.h>
  5. // This work is based on:
  6. // Matthias Teschner, Bruno Heidelberger, Matthias Mueller, Danat Pomeranets, Markus Gross. Optimized Spatial Hashing for Collision Detection of Deformable Objects. 2003
  7. // John McDonald, Mark Kilgard. Crack-Free Point-Normal Triangles using Adjacent Edge Normals. 2010
  8. // John Hable. Variable Rate Shading with Visibility Buffer Rendering. 2024
  9. namespace meshopt
  10. {
  11. static unsigned int hashUpdate4(unsigned int h, const unsigned char* key, size_t len)
  12. {
  13. // MurmurHash2
  14. const unsigned int m = 0x5bd1e995;
  15. const int r = 24;
  16. while (len >= 4)
  17. {
  18. unsigned int k = *reinterpret_cast<const unsigned int*>(key);
  19. k *= m;
  20. k ^= k >> r;
  21. k *= m;
  22. h *= m;
  23. h ^= k;
  24. key += 4;
  25. len -= 4;
  26. }
  27. return h;
  28. }
  29. struct VertexHasher
  30. {
  31. const unsigned char* vertices;
  32. size_t vertex_size;
  33. size_t vertex_stride;
  34. size_t hash(unsigned int index) const
  35. {
  36. return hashUpdate4(0, vertices + index * vertex_stride, vertex_size);
  37. }
  38. bool equal(unsigned int lhs, unsigned int rhs) const
  39. {
  40. return memcmp(vertices + lhs * vertex_stride, vertices + rhs * vertex_stride, vertex_size) == 0;
  41. }
  42. };
  43. struct VertexStreamHasher
  44. {
  45. const meshopt_Stream* streams;
  46. size_t stream_count;
  47. size_t hash(unsigned int index) const
  48. {
  49. unsigned int h = 0;
  50. for (size_t i = 0; i < stream_count; ++i)
  51. {
  52. const meshopt_Stream& s = streams[i];
  53. const unsigned char* data = static_cast<const unsigned char*>(s.data);
  54. h = hashUpdate4(h, data + index * s.stride, s.size);
  55. }
  56. return h;
  57. }
  58. bool equal(unsigned int lhs, unsigned int rhs) const
  59. {
  60. for (size_t i = 0; i < stream_count; ++i)
  61. {
  62. const meshopt_Stream& s = streams[i];
  63. const unsigned char* data = static_cast<const unsigned char*>(s.data);
  64. if (memcmp(data + lhs * s.stride, data + rhs * s.stride, s.size) != 0)
  65. return false;
  66. }
  67. return true;
  68. }
  69. };
  70. struct VertexCustomHasher
  71. {
  72. const float* vertex_positions;
  73. size_t vertex_stride_float;
  74. int (*callback)(void*, unsigned int, unsigned int);
  75. void* context;
  76. size_t hash(unsigned int index) const
  77. {
  78. const unsigned int* key = reinterpret_cast<const unsigned int*>(vertex_positions + index * vertex_stride_float);
  79. unsigned int x = key[0], y = key[1], z = key[2];
  80. // replace negative zero with zero
  81. x = (x == 0x80000000) ? 0 : x;
  82. y = (y == 0x80000000) ? 0 : y;
  83. z = (z == 0x80000000) ? 0 : z;
  84. // scramble bits to make sure that integer coordinates have entropy in lower bits
  85. x ^= x >> 17;
  86. y ^= y >> 17;
  87. z ^= z >> 17;
  88. // Optimized Spatial Hashing for Collision Detection of Deformable Objects
  89. return (x * 73856093) ^ (y * 19349663) ^ (z * 83492791);
  90. }
  91. bool equal(unsigned int lhs, unsigned int rhs) const
  92. {
  93. const float* lp = vertex_positions + lhs * vertex_stride_float;
  94. const float* rp = vertex_positions + rhs * vertex_stride_float;
  95. if (lp[0] != rp[0] || lp[1] != rp[1] || lp[2] != rp[2])
  96. return false;
  97. return callback ? callback(context, lhs, rhs) : true;
  98. }
  99. };
  100. struct EdgeHasher
  101. {
  102. const unsigned int* remap;
  103. size_t hash(unsigned long long edge) const
  104. {
  105. unsigned int e0 = unsigned(edge >> 32);
  106. unsigned int e1 = unsigned(edge);
  107. unsigned int h1 = remap[e0];
  108. unsigned int h2 = remap[e1];
  109. const unsigned int m = 0x5bd1e995;
  110. // MurmurHash64B finalizer
  111. h1 ^= h2 >> 18;
  112. h1 *= m;
  113. h2 ^= h1 >> 22;
  114. h2 *= m;
  115. h1 ^= h2 >> 17;
  116. h1 *= m;
  117. h2 ^= h1 >> 19;
  118. h2 *= m;
  119. return h2;
  120. }
  121. bool equal(unsigned long long lhs, unsigned long long rhs) const
  122. {
  123. unsigned int l0 = unsigned(lhs >> 32);
  124. unsigned int l1 = unsigned(lhs);
  125. unsigned int r0 = unsigned(rhs >> 32);
  126. unsigned int r1 = unsigned(rhs);
  127. return remap[l0] == remap[r0] && remap[l1] == remap[r1];
  128. }
  129. };
  130. static size_t hashBuckets(size_t count)
  131. {
  132. size_t buckets = 1;
  133. while (buckets < count + count / 4)
  134. buckets *= 2;
  135. return buckets;
  136. }
  137. template <typename T, typename Hash>
  138. static T* hashLookup(T* table, size_t buckets, const Hash& hash, const T& key, const T& empty)
  139. {
  140. assert(buckets > 0);
  141. assert((buckets & (buckets - 1)) == 0);
  142. size_t hashmod = buckets - 1;
  143. size_t bucket = hash.hash(key) & hashmod;
  144. for (size_t probe = 0; probe <= hashmod; ++probe)
  145. {
  146. T& item = table[bucket];
  147. if (item == empty)
  148. return &item;
  149. if (hash.equal(item, key))
  150. return &item;
  151. // hash collision, quadratic probing
  152. bucket = (bucket + probe + 1) & hashmod;
  153. }
  154. assert(false && "Hash table is full"); // unreachable
  155. return NULL;
  156. }
  157. static void buildPositionRemap(unsigned int* remap, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, meshopt_Allocator& allocator)
  158. {
  159. VertexHasher vertex_hasher = {reinterpret_cast<const unsigned char*>(vertex_positions), 3 * sizeof(float), vertex_positions_stride};
  160. size_t vertex_table_size = hashBuckets(vertex_count);
  161. unsigned int* vertex_table = allocator.allocate<unsigned int>(vertex_table_size);
  162. memset(vertex_table, -1, vertex_table_size * sizeof(unsigned int));
  163. for (size_t i = 0; i < vertex_count; ++i)
  164. {
  165. unsigned int index = unsigned(i);
  166. unsigned int* entry = hashLookup(vertex_table, vertex_table_size, vertex_hasher, index, ~0u);
  167. if (*entry == ~0u)
  168. *entry = index;
  169. remap[index] = *entry;
  170. }
  171. allocator.deallocate(vertex_table);
  172. }
  173. template <typename Hash>
  174. static size_t generateVertexRemap(unsigned int* remap, const unsigned int* indices, size_t index_count, size_t vertex_count, const Hash& hash, meshopt_Allocator& allocator)
  175. {
  176. memset(remap, -1, vertex_count * sizeof(unsigned int));
  177. size_t table_size = hashBuckets(vertex_count);
  178. unsigned int* table = allocator.allocate<unsigned int>(table_size);
  179. memset(table, -1, table_size * sizeof(unsigned int));
  180. unsigned int next_vertex = 0;
  181. for (size_t i = 0; i < index_count; ++i)
  182. {
  183. unsigned int index = indices ? indices[i] : unsigned(i);
  184. assert(index < vertex_count);
  185. if (remap[index] != ~0u)
  186. continue;
  187. unsigned int* entry = hashLookup(table, table_size, hash, index, ~0u);
  188. if (*entry == ~0u)
  189. {
  190. *entry = index;
  191. remap[index] = next_vertex++;
  192. }
  193. else
  194. {
  195. assert(remap[*entry] != ~0u);
  196. remap[index] = remap[*entry];
  197. }
  198. }
  199. assert(next_vertex <= vertex_count);
  200. return next_vertex;
  201. }
  202. template <size_t BlockSize>
  203. static void remapVertices(void* destination, const void* vertices, size_t vertex_count, size_t vertex_size, const unsigned int* remap)
  204. {
  205. size_t block_size = BlockSize == 0 ? vertex_size : BlockSize;
  206. assert(block_size == vertex_size);
  207. for (size_t i = 0; i < vertex_count; ++i)
  208. if (remap[i] != ~0u)
  209. {
  210. assert(remap[i] < vertex_count);
  211. memcpy(static_cast<unsigned char*>(destination) + remap[i] * block_size, static_cast<const unsigned char*>(vertices) + i * block_size, block_size);
  212. }
  213. }
  214. template <typename Hash>
  215. static void generateShadowBuffer(unsigned int* destination, const unsigned int* indices, size_t index_count, size_t vertex_count, const Hash& hash, meshopt_Allocator& allocator)
  216. {
  217. unsigned int* remap = allocator.allocate<unsigned int>(vertex_count);
  218. memset(remap, -1, vertex_count * sizeof(unsigned int));
  219. size_t table_size = hashBuckets(vertex_count);
  220. unsigned int* table = allocator.allocate<unsigned int>(table_size);
  221. memset(table, -1, table_size * sizeof(unsigned int));
  222. for (size_t i = 0; i < index_count; ++i)
  223. {
  224. unsigned int index = indices[i];
  225. assert(index < vertex_count);
  226. if (remap[index] == ~0u)
  227. {
  228. unsigned int* entry = hashLookup(table, table_size, hash, index, ~0u);
  229. if (*entry == ~0u)
  230. *entry = index;
  231. remap[index] = *entry;
  232. }
  233. destination[i] = remap[index];
  234. }
  235. }
  236. } // namespace meshopt
  237. size_t meshopt_generateVertexRemap(unsigned int* destination, const unsigned int* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size)
  238. {
  239. using namespace meshopt;
  240. assert(indices || index_count == vertex_count);
  241. assert(!indices || index_count % 3 == 0);
  242. assert(vertex_size > 0 && vertex_size <= 256);
  243. meshopt_Allocator allocator;
  244. VertexHasher hasher = {static_cast<const unsigned char*>(vertices), vertex_size, vertex_size};
  245. return generateVertexRemap(destination, indices, index_count, vertex_count, hasher, allocator);
  246. }
  247. size_t meshopt_generateVertexRemapMulti(unsigned int* destination, const unsigned int* indices, size_t index_count, size_t vertex_count, const struct meshopt_Stream* streams, size_t stream_count)
  248. {
  249. using namespace meshopt;
  250. assert(indices || index_count == vertex_count);
  251. assert(index_count % 3 == 0);
  252. assert(stream_count > 0 && stream_count <= 16);
  253. for (size_t i = 0; i < stream_count; ++i)
  254. {
  255. assert(streams[i].size > 0 && streams[i].size <= 256);
  256. assert(streams[i].size <= streams[i].stride);
  257. }
  258. meshopt_Allocator allocator;
  259. VertexStreamHasher hasher = {streams, stream_count};
  260. return generateVertexRemap(destination, indices, index_count, vertex_count, hasher, allocator);
  261. }
  262. size_t meshopt_generateVertexRemapCustom(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, int (*callback)(void*, unsigned int, unsigned int), void* context)
  263. {
  264. using namespace meshopt;
  265. assert(indices || index_count == vertex_count);
  266. assert(!indices || index_count % 3 == 0);
  267. assert(vertex_positions_stride >= 12 && vertex_positions_stride <= 256);
  268. assert(vertex_positions_stride % sizeof(float) == 0);
  269. meshopt_Allocator allocator;
  270. VertexCustomHasher hasher = {vertex_positions, vertex_positions_stride / sizeof(float), callback, context};
  271. return generateVertexRemap(destination, indices, index_count, vertex_count, hasher, allocator);
  272. }
  273. void meshopt_remapVertexBuffer(void* destination, const void* vertices, size_t vertex_count, size_t vertex_size, const unsigned int* remap)
  274. {
  275. using namespace meshopt;
  276. assert(vertex_size > 0 && vertex_size <= 256);
  277. meshopt_Allocator allocator;
  278. // support in-place remap
  279. if (destination == vertices)
  280. {
  281. unsigned char* vertices_copy = allocator.allocate<unsigned char>(vertex_count * vertex_size);
  282. memcpy(vertices_copy, vertices, vertex_count * vertex_size);
  283. vertices = vertices_copy;
  284. }
  285. // specialize the loop for common vertex sizes to ensure memcpy is compiled as an inlined intrinsic
  286. switch (vertex_size)
  287. {
  288. case 4:
  289. return remapVertices<4>(destination, vertices, vertex_count, vertex_size, remap);
  290. case 8:
  291. return remapVertices<8>(destination, vertices, vertex_count, vertex_size, remap);
  292. case 12:
  293. return remapVertices<12>(destination, vertices, vertex_count, vertex_size, remap);
  294. case 16:
  295. return remapVertices<16>(destination, vertices, vertex_count, vertex_size, remap);
  296. default:
  297. return remapVertices<0>(destination, vertices, vertex_count, vertex_size, remap);
  298. }
  299. }
  300. void meshopt_remapIndexBuffer(unsigned int* destination, const unsigned int* indices, size_t index_count, const unsigned int* remap)
  301. {
  302. assert(index_count % 3 == 0);
  303. for (size_t i = 0; i < index_count; ++i)
  304. {
  305. unsigned int index = indices ? indices[i] : unsigned(i);
  306. assert(remap[index] != ~0u);
  307. destination[i] = remap[index];
  308. }
  309. }
  310. void meshopt_generateShadowIndexBuffer(unsigned int* destination, const unsigned int* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size, size_t vertex_stride)
  311. {
  312. using namespace meshopt;
  313. assert(indices);
  314. assert(index_count % 3 == 0);
  315. assert(vertex_size > 0 && vertex_size <= 256);
  316. assert(vertex_size <= vertex_stride);
  317. meshopt_Allocator allocator;
  318. VertexHasher hasher = {static_cast<const unsigned char*>(vertices), vertex_size, vertex_stride};
  319. generateShadowBuffer(destination, indices, index_count, vertex_count, hasher, allocator);
  320. }
  321. void meshopt_generateShadowIndexBufferMulti(unsigned int* destination, const unsigned int* indices, size_t index_count, size_t vertex_count, const struct meshopt_Stream* streams, size_t stream_count)
  322. {
  323. using namespace meshopt;
  324. assert(indices);
  325. assert(index_count % 3 == 0);
  326. assert(stream_count > 0 && stream_count <= 16);
  327. for (size_t i = 0; i < stream_count; ++i)
  328. {
  329. assert(streams[i].size > 0 && streams[i].size <= 256);
  330. assert(streams[i].size <= streams[i].stride);
  331. }
  332. meshopt_Allocator allocator;
  333. VertexStreamHasher hasher = {streams, stream_count};
  334. generateShadowBuffer(destination, indices, index_count, vertex_count, hasher, allocator);
  335. }
  336. void meshopt_generatePositionRemap(unsigned int* destination, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride)
  337. {
  338. using namespace meshopt;
  339. assert(vertex_positions_stride >= 12 && vertex_positions_stride <= 256);
  340. assert(vertex_positions_stride % sizeof(float) == 0);
  341. meshopt_Allocator allocator;
  342. VertexCustomHasher hasher = {vertex_positions, vertex_positions_stride / sizeof(float), NULL, NULL};
  343. size_t table_size = hashBuckets(vertex_count);
  344. unsigned int* table = allocator.allocate<unsigned int>(table_size);
  345. memset(table, -1, table_size * sizeof(unsigned int));
  346. for (size_t i = 0; i < vertex_count; ++i)
  347. {
  348. unsigned int* entry = hashLookup(table, table_size, hasher, unsigned(i), ~0u);
  349. if (*entry == ~0u)
  350. *entry = unsigned(i);
  351. destination[i] = *entry;
  352. }
  353. }
  354. void meshopt_generateAdjacencyIndexBuffer(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride)
  355. {
  356. using namespace meshopt;
  357. assert(index_count % 3 == 0);
  358. assert(vertex_positions_stride >= 12 && vertex_positions_stride <= 256);
  359. assert(vertex_positions_stride % sizeof(float) == 0);
  360. meshopt_Allocator allocator;
  361. static const int next[4] = {1, 2, 0, 1};
  362. // build position remap: for each vertex, which other (canonical) vertex does it map to?
  363. unsigned int* remap = allocator.allocate<unsigned int>(vertex_count);
  364. buildPositionRemap(remap, vertex_positions, vertex_count, vertex_positions_stride, allocator);
  365. // build edge set; this stores all triangle edges but we can look these up by any other wedge
  366. EdgeHasher edge_hasher = {remap};
  367. size_t edge_table_size = hashBuckets(index_count);
  368. unsigned long long* edge_table = allocator.allocate<unsigned long long>(edge_table_size);
  369. unsigned int* edge_vertex_table = allocator.allocate<unsigned int>(edge_table_size);
  370. memset(edge_table, -1, edge_table_size * sizeof(unsigned long long));
  371. memset(edge_vertex_table, -1, edge_table_size * sizeof(unsigned int));
  372. for (size_t i = 0; i < index_count; i += 3)
  373. {
  374. for (int e = 0; e < 3; ++e)
  375. {
  376. unsigned int i0 = indices[i + e];
  377. unsigned int i1 = indices[i + next[e]];
  378. unsigned int i2 = indices[i + next[e + 1]];
  379. assert(i0 < vertex_count && i1 < vertex_count && i2 < vertex_count);
  380. unsigned long long edge = ((unsigned long long)i0 << 32) | i1;
  381. unsigned long long* entry = hashLookup(edge_table, edge_table_size, edge_hasher, edge, ~0ull);
  382. if (*entry == ~0ull)
  383. {
  384. *entry = edge;
  385. // store vertex opposite to the edge
  386. edge_vertex_table[entry - edge_table] = i2;
  387. }
  388. }
  389. }
  390. // build resulting index buffer: 6 indices for each input triangle
  391. for (size_t i = 0; i < index_count; i += 3)
  392. {
  393. unsigned int patch[6];
  394. for (int e = 0; e < 3; ++e)
  395. {
  396. unsigned int i0 = indices[i + e];
  397. unsigned int i1 = indices[i + next[e]];
  398. assert(i0 < vertex_count && i1 < vertex_count);
  399. // note: this refers to the opposite edge!
  400. unsigned long long edge = ((unsigned long long)i1 << 32) | i0;
  401. unsigned long long* oppe = hashLookup(edge_table, edge_table_size, edge_hasher, edge, ~0ull);
  402. patch[e * 2 + 0] = i0;
  403. patch[e * 2 + 1] = (*oppe == ~0ull) ? i0 : edge_vertex_table[oppe - edge_table];
  404. }
  405. memcpy(destination + i * 2, patch, sizeof(patch));
  406. }
  407. }
  408. void meshopt_generateTessellationIndexBuffer(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride)
  409. {
  410. using namespace meshopt;
  411. assert(index_count % 3 == 0);
  412. assert(vertex_positions_stride >= 12 && vertex_positions_stride <= 256);
  413. assert(vertex_positions_stride % sizeof(float) == 0);
  414. meshopt_Allocator allocator;
  415. static const int next[3] = {1, 2, 0};
  416. // build position remap: for each vertex, which other (canonical) vertex does it map to?
  417. unsigned int* remap = allocator.allocate<unsigned int>(vertex_count);
  418. buildPositionRemap(remap, vertex_positions, vertex_count, vertex_positions_stride, allocator);
  419. // build edge set; this stores all triangle edges but we can look these up by any other wedge
  420. EdgeHasher edge_hasher = {remap};
  421. size_t edge_table_size = hashBuckets(index_count);
  422. unsigned long long* edge_table = allocator.allocate<unsigned long long>(edge_table_size);
  423. memset(edge_table, -1, edge_table_size * sizeof(unsigned long long));
  424. for (size_t i = 0; i < index_count; i += 3)
  425. {
  426. for (int e = 0; e < 3; ++e)
  427. {
  428. unsigned int i0 = indices[i + e];
  429. unsigned int i1 = indices[i + next[e]];
  430. assert(i0 < vertex_count && i1 < vertex_count);
  431. unsigned long long edge = ((unsigned long long)i0 << 32) | i1;
  432. unsigned long long* entry = hashLookup(edge_table, edge_table_size, edge_hasher, edge, ~0ull);
  433. if (*entry == ~0ull)
  434. *entry = edge;
  435. }
  436. }
  437. // build resulting index buffer: 12 indices for each input triangle
  438. for (size_t i = 0; i < index_count; i += 3)
  439. {
  440. unsigned int patch[12];
  441. for (int e = 0; e < 3; ++e)
  442. {
  443. unsigned int i0 = indices[i + e];
  444. unsigned int i1 = indices[i + next[e]];
  445. assert(i0 < vertex_count && i1 < vertex_count);
  446. // note: this refers to the opposite edge!
  447. unsigned long long edge = ((unsigned long long)i1 << 32) | i0;
  448. unsigned long long oppe = *hashLookup(edge_table, edge_table_size, edge_hasher, edge, ~0ull);
  449. // use the same edge if opposite edge doesn't exist (border)
  450. oppe = (oppe == ~0ull) ? edge : oppe;
  451. // triangle index (0, 1, 2)
  452. patch[e] = i0;
  453. // opposite edge (3, 4; 5, 6; 7, 8)
  454. patch[3 + e * 2 + 0] = unsigned(oppe);
  455. patch[3 + e * 2 + 1] = unsigned(oppe >> 32);
  456. // dominant vertex (9, 10, 11)
  457. patch[9 + e] = remap[i0];
  458. }
  459. memcpy(destination + i * 4, patch, sizeof(patch));
  460. }
  461. }
  462. size_t meshopt_generateProvokingIndexBuffer(unsigned int* destination, unsigned int* reorder, const unsigned int* indices, size_t index_count, size_t vertex_count)
  463. {
  464. assert(index_count % 3 == 0);
  465. meshopt_Allocator allocator;
  466. unsigned int* remap = allocator.allocate<unsigned int>(vertex_count);
  467. memset(remap, -1, vertex_count * sizeof(unsigned int));
  468. // compute vertex valence; this is used to prioritize least used corner
  469. // note: we use 8-bit counters for performance; for outlier vertices the valence is incorrect but that just affects the heuristic
  470. unsigned char* valence = allocator.allocate<unsigned char>(vertex_count);
  471. memset(valence, 0, vertex_count);
  472. for (size_t i = 0; i < index_count; ++i)
  473. {
  474. unsigned int index = indices[i];
  475. assert(index < vertex_count);
  476. valence[index]++;
  477. }
  478. unsigned int reorder_offset = 0;
  479. // assign provoking vertices; leave the rest for the next pass
  480. for (size_t i = 0; i < index_count; i += 3)
  481. {
  482. unsigned int a = indices[i + 0], b = indices[i + 1], c = indices[i + 2];
  483. assert(a < vertex_count && b < vertex_count && c < vertex_count);
  484. // try to rotate triangle such that provoking vertex hasn't been seen before
  485. // if multiple vertices are new, prioritize the one with least valence
  486. // this reduces the risk that a future triangle will have all three vertices seen
  487. unsigned int va = remap[a] == ~0u ? valence[a] : ~0u;
  488. unsigned int vb = remap[b] == ~0u ? valence[b] : ~0u;
  489. unsigned int vc = remap[c] == ~0u ? valence[c] : ~0u;
  490. if (vb != ~0u && vb <= va && vb <= vc)
  491. {
  492. // abc -> bca
  493. unsigned int t = a;
  494. a = b, b = c, c = t;
  495. }
  496. else if (vc != ~0u && vc <= va && vc <= vb)
  497. {
  498. // abc -> cab
  499. unsigned int t = c;
  500. c = b, b = a, a = t;
  501. }
  502. unsigned int newidx = reorder_offset;
  503. // now remap[a] = ~0u or all three vertices are old
  504. // recording remap[a] makes it possible to remap future references to the same index, conserving space
  505. if (remap[a] == ~0u)
  506. remap[a] = newidx;
  507. // we need to clone the provoking vertex to get a unique index
  508. // if all three are used the choice is arbitrary since no future triangle will be able to reuse any of these
  509. reorder[reorder_offset++] = a;
  510. // note: first vertex is final, the other two will be fixed up in next pass
  511. destination[i + 0] = newidx;
  512. destination[i + 1] = b;
  513. destination[i + 2] = c;
  514. // update vertex valences for corner heuristic
  515. valence[a]--;
  516. valence[b]--;
  517. valence[c]--;
  518. }
  519. // remap or clone non-provoking vertices (iterating to skip provoking vertices)
  520. int step = 1;
  521. for (size_t i = 1; i < index_count; i += step, step ^= 3)
  522. {
  523. unsigned int index = destination[i];
  524. if (remap[index] == ~0u)
  525. {
  526. // we haven't seen the vertex before as a provoking vertex
  527. // to maintain the reference to the original vertex we need to clone it
  528. unsigned int newidx = reorder_offset;
  529. remap[index] = newidx;
  530. reorder[reorder_offset++] = index;
  531. }
  532. destination[i] = remap[index];
  533. }
  534. assert(reorder_offset <= vertex_count + index_count / 3);
  535. return reorder_offset;
  536. }