simplifier.cpp 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657
  1. // This file is part of meshoptimizer library; see meshoptimizer.h for version/license details
  2. #include "meshoptimizer.h"
  3. #include <assert.h>
  4. #include <float.h>
  5. #include <math.h>
  6. #include <string.h>
  7. #ifndef TRACE
  8. #define TRACE 0
  9. #endif
  10. #if TRACE
  11. #include <stdio.h>
  12. #endif
  13. #if TRACE
  14. #define TRACESTATS(i) stats[i]++;
  15. #else
  16. #define TRACESTATS(i) (void)0
  17. #endif
  18. // This work is based on:
  19. // Michael Garland and Paul S. Heckbert. Surface simplification using quadric error metrics. 1997
  20. // Michael Garland. Quadric-based polygonal surface simplification. 1999
  21. // Peter Lindstrom. Out-of-Core Simplification of Large Polygonal Models. 2000
  22. // Matthias Teschner, Bruno Heidelberger, Matthias Mueller, Danat Pomeranets, Markus Gross. Optimized Spatial Hashing for Collision Detection of Deformable Objects. 2003
  23. // Peter Van Sandt, Yannis Chronis, Jignesh M. Patel. Efficiently Searching In-Memory Sorted Arrays: Revenge of the Interpolation Search? 2019
  24. namespace meshopt
  25. {
  26. struct EdgeAdjacency
  27. {
  28. struct Edge
  29. {
  30. unsigned int next;
  31. unsigned int prev;
  32. };
  33. unsigned int* counts;
  34. unsigned int* offsets;
  35. Edge* data;
  36. };
  37. static void prepareEdgeAdjacency(EdgeAdjacency& adjacency, size_t index_count, size_t vertex_count, meshopt_Allocator& allocator)
  38. {
  39. adjacency.counts = allocator.allocate<unsigned int>(vertex_count);
  40. adjacency.offsets = allocator.allocate<unsigned int>(vertex_count);
  41. adjacency.data = allocator.allocate<EdgeAdjacency::Edge>(index_count);
  42. }
  43. static void updateEdgeAdjacency(EdgeAdjacency& adjacency, const unsigned int* indices, size_t index_count, size_t vertex_count, const unsigned int* remap)
  44. {
  45. size_t face_count = index_count / 3;
  46. // fill edge counts
  47. memset(adjacency.counts, 0, vertex_count * sizeof(unsigned int));
  48. for (size_t i = 0; i < index_count; ++i)
  49. {
  50. unsigned int v = remap ? remap[indices[i]] : indices[i];
  51. assert(v < vertex_count);
  52. adjacency.counts[v]++;
  53. }
  54. // fill offset table
  55. unsigned int offset = 0;
  56. for (size_t i = 0; i < vertex_count; ++i)
  57. {
  58. adjacency.offsets[i] = offset;
  59. offset += adjacency.counts[i];
  60. }
  61. assert(offset == index_count);
  62. // fill edge data
  63. for (size_t i = 0; i < face_count; ++i)
  64. {
  65. unsigned int a = indices[i * 3 + 0], b = indices[i * 3 + 1], c = indices[i * 3 + 2];
  66. if (remap)
  67. {
  68. a = remap[a];
  69. b = remap[b];
  70. c = remap[c];
  71. }
  72. adjacency.data[adjacency.offsets[a]].next = b;
  73. adjacency.data[adjacency.offsets[a]].prev = c;
  74. adjacency.offsets[a]++;
  75. adjacency.data[adjacency.offsets[b]].next = c;
  76. adjacency.data[adjacency.offsets[b]].prev = a;
  77. adjacency.offsets[b]++;
  78. adjacency.data[adjacency.offsets[c]].next = a;
  79. adjacency.data[adjacency.offsets[c]].prev = b;
  80. adjacency.offsets[c]++;
  81. }
  82. // fix offsets that have been disturbed by the previous pass
  83. for (size_t i = 0; i < vertex_count; ++i)
  84. {
  85. assert(adjacency.offsets[i] >= adjacency.counts[i]);
  86. adjacency.offsets[i] -= adjacency.counts[i];
  87. }
  88. }
  89. struct PositionHasher
  90. {
  91. const float* vertex_positions;
  92. size_t vertex_stride_float;
  93. size_t hash(unsigned int index) const
  94. {
  95. const unsigned int* key = reinterpret_cast<const unsigned int*>(vertex_positions + index * vertex_stride_float);
  96. // Optimized Spatial Hashing for Collision Detection of Deformable Objects
  97. return (key[0] * 73856093) ^ (key[1] * 19349663) ^ (key[2] * 83492791);
  98. }
  99. bool equal(unsigned int lhs, unsigned int rhs) const
  100. {
  101. return memcmp(vertex_positions + lhs * vertex_stride_float, vertex_positions + rhs * vertex_stride_float, sizeof(float) * 3) == 0;
  102. }
  103. };
  104. static size_t hashBuckets2(size_t count)
  105. {
  106. size_t buckets = 1;
  107. while (buckets < count)
  108. buckets *= 2;
  109. return buckets;
  110. }
  111. template <typename T, typename Hash>
  112. static T* hashLookup2(T* table, size_t buckets, const Hash& hash, const T& key, const T& empty)
  113. {
  114. assert(buckets > 0);
  115. assert((buckets & (buckets - 1)) == 0);
  116. size_t hashmod = buckets - 1;
  117. size_t bucket = hash.hash(key) & hashmod;
  118. for (size_t probe = 0; probe <= hashmod; ++probe)
  119. {
  120. T& item = table[bucket];
  121. if (item == empty)
  122. return &item;
  123. if (hash.equal(item, key))
  124. return &item;
  125. // hash collision, quadratic probing
  126. bucket = (bucket + probe + 1) & hashmod;
  127. }
  128. assert(false && "Hash table is full"); // unreachable
  129. return 0;
  130. }
  131. static void buildPositionRemap(unsigned int* remap, unsigned int* wedge, const float* vertex_positions_data, size_t vertex_count, size_t vertex_positions_stride, meshopt_Allocator& allocator)
  132. {
  133. PositionHasher hasher = {vertex_positions_data, vertex_positions_stride / sizeof(float)};
  134. size_t table_size = hashBuckets2(vertex_count);
  135. unsigned int* table = allocator.allocate<unsigned int>(table_size);
  136. memset(table, -1, table_size * sizeof(unsigned int));
  137. // build forward remap: for each vertex, which other (canonical) vertex does it map to?
  138. // we use position equivalence for this, and remap vertices to other existing vertices
  139. for (size_t i = 0; i < vertex_count; ++i)
  140. {
  141. unsigned int index = unsigned(i);
  142. unsigned int* entry = hashLookup2(table, table_size, hasher, index, ~0u);
  143. if (*entry == ~0u)
  144. *entry = index;
  145. remap[index] = *entry;
  146. }
  147. // build wedge table: for each vertex, which other vertex is the next wedge that also maps to the same vertex?
  148. // entries in table form a (cyclic) wedge loop per vertex; for manifold vertices, wedge[i] == remap[i] == i
  149. for (size_t i = 0; i < vertex_count; ++i)
  150. wedge[i] = unsigned(i);
  151. for (size_t i = 0; i < vertex_count; ++i)
  152. if (remap[i] != i)
  153. {
  154. unsigned int r = remap[i];
  155. wedge[i] = wedge[r];
  156. wedge[r] = unsigned(i);
  157. }
  158. }
  159. enum VertexKind
  160. {
  161. Kind_Manifold, // not on an attribute seam, not on any boundary
  162. Kind_Border, // not on an attribute seam, has exactly two open edges
  163. Kind_Seam, // on an attribute seam with exactly two attribute seam edges
  164. Kind_Complex, // none of the above; these vertices can move as long as all wedges move to the target vertex
  165. Kind_Locked, // none of the above; these vertices can't move
  166. Kind_Count
  167. };
  168. // manifold vertices can collapse onto anything
  169. // border/seam vertices can only be collapsed onto border/seam respectively
  170. // complex vertices can collapse onto complex/locked
  171. // a rule of thumb is that collapsing kind A into kind B preserves the kind B in the target vertex
  172. // for example, while we could collapse Complex into Manifold, this would mean the target vertex isn't Manifold anymore
  173. const unsigned char kCanCollapse[Kind_Count][Kind_Count] = {
  174. {1, 1, 1, 1, 1},
  175. {0, 1, 0, 0, 0},
  176. {0, 0, 1, 0, 0},
  177. {0, 0, 0, 1, 1},
  178. {0, 0, 0, 0, 0},
  179. };
  180. // if a vertex is manifold or seam, adjoining edges are guaranteed to have an opposite edge
  181. // note that for seam edges, the opposite edge isn't present in the attribute-based topology
  182. // but is present if you consider a position-only mesh variant
  183. const unsigned char kHasOpposite[Kind_Count][Kind_Count] = {
  184. {1, 1, 1, 0, 1},
  185. {1, 0, 1, 0, 0},
  186. {1, 1, 1, 0, 1},
  187. {0, 0, 0, 0, 0},
  188. {1, 0, 1, 0, 0},
  189. };
  190. static bool hasEdge(const EdgeAdjacency& adjacency, unsigned int a, unsigned int b)
  191. {
  192. unsigned int count = adjacency.counts[a];
  193. const EdgeAdjacency::Edge* edges = adjacency.data + adjacency.offsets[a];
  194. for (size_t i = 0; i < count; ++i)
  195. if (edges[i].next == b)
  196. return true;
  197. return false;
  198. }
  199. static void classifyVertices(unsigned char* result, unsigned int* loop, unsigned int* loopback, size_t vertex_count, const EdgeAdjacency& adjacency, const unsigned int* remap, const unsigned int* wedge)
  200. {
  201. memset(loop, -1, vertex_count * sizeof(unsigned int));
  202. memset(loopback, -1, vertex_count * sizeof(unsigned int));
  203. // incoming & outgoing open edges: ~0u if no open edges, i if there are more than 1
  204. // note that this is the same data as required in loop[] arrays; loop[] data is only valid for border/seam
  205. // but here it's okay to fill the data out for other types of vertices as well
  206. unsigned int* openinc = loopback;
  207. unsigned int* openout = loop;
  208. for (size_t i = 0; i < vertex_count; ++i)
  209. {
  210. unsigned int vertex = unsigned(i);
  211. unsigned int count = adjacency.counts[vertex];
  212. const EdgeAdjacency::Edge* edges = adjacency.data + adjacency.offsets[vertex];
  213. for (size_t j = 0; j < count; ++j)
  214. {
  215. unsigned int target = edges[j].next;
  216. if (!hasEdge(adjacency, target, vertex))
  217. {
  218. openinc[target] = (openinc[target] == ~0u) ? vertex : target;
  219. openout[vertex] = (openout[vertex] == ~0u) ? target : vertex;
  220. }
  221. }
  222. }
  223. #if TRACE
  224. size_t stats[4] = {};
  225. #endif
  226. for (size_t i = 0; i < vertex_count; ++i)
  227. {
  228. if (remap[i] == i)
  229. {
  230. if (wedge[i] == i)
  231. {
  232. // no attribute seam, need to check if it's manifold
  233. unsigned int openi = openinc[i], openo = openout[i];
  234. // note: we classify any vertices with no open edges as manifold
  235. // this is technically incorrect - if 4 triangles share an edge, we'll classify vertices as manifold
  236. // it's unclear if this is a problem in practice
  237. if (openi == ~0u && openo == ~0u)
  238. {
  239. result[i] = Kind_Manifold;
  240. }
  241. else if (openi != i && openo != i)
  242. {
  243. result[i] = Kind_Border;
  244. }
  245. else
  246. {
  247. result[i] = Kind_Locked;
  248. TRACESTATS(0);
  249. }
  250. }
  251. else if (wedge[wedge[i]] == i)
  252. {
  253. // attribute seam; need to distinguish between Seam and Locked
  254. unsigned int w = wedge[i];
  255. unsigned int openiv = openinc[i], openov = openout[i];
  256. unsigned int openiw = openinc[w], openow = openout[w];
  257. // seam should have one open half-edge for each vertex, and the edges need to "connect" - point to the same vertex post-remap
  258. if (openiv != ~0u && openiv != i && openov != ~0u && openov != i &&
  259. openiw != ~0u && openiw != w && openow != ~0u && openow != w)
  260. {
  261. if (remap[openiv] == remap[openow] && remap[openov] == remap[openiw])
  262. {
  263. result[i] = Kind_Seam;
  264. }
  265. else
  266. {
  267. result[i] = Kind_Locked;
  268. TRACESTATS(1);
  269. }
  270. }
  271. else
  272. {
  273. result[i] = Kind_Locked;
  274. TRACESTATS(2);
  275. }
  276. }
  277. else
  278. {
  279. // more than one vertex maps to this one; we don't have classification available
  280. result[i] = Kind_Locked;
  281. TRACESTATS(3);
  282. }
  283. }
  284. else
  285. {
  286. assert(remap[i] < i);
  287. result[i] = result[remap[i]];
  288. }
  289. }
  290. #if TRACE
  291. printf("locked: many open edges %d, disconnected seam %d, many seam edges %d, many wedges %d\n",
  292. int(stats[0]), int(stats[1]), int(stats[2]), int(stats[3]));
  293. #endif
  294. }
  295. struct Vector3
  296. {
  297. float x, y, z;
  298. };
  299. static float rescalePositions(Vector3* result, const float* vertex_positions_data, size_t vertex_count, size_t vertex_positions_stride)
  300. {
  301. size_t vertex_stride_float = vertex_positions_stride / sizeof(float);
  302. float minv[3] = {FLT_MAX, FLT_MAX, FLT_MAX};
  303. float maxv[3] = {-FLT_MAX, -FLT_MAX, -FLT_MAX};
  304. for (size_t i = 0; i < vertex_count; ++i)
  305. {
  306. const float* v = vertex_positions_data + i * vertex_stride_float;
  307. if (result)
  308. {
  309. result[i].x = v[0];
  310. result[i].y = v[1];
  311. result[i].z = v[2];
  312. }
  313. for (int j = 0; j < 3; ++j)
  314. {
  315. float vj = v[j];
  316. minv[j] = minv[j] > vj ? vj : minv[j];
  317. maxv[j] = maxv[j] < vj ? vj : maxv[j];
  318. }
  319. }
  320. float extent = 0.f;
  321. extent = (maxv[0] - minv[0]) < extent ? extent : (maxv[0] - minv[0]);
  322. extent = (maxv[1] - minv[1]) < extent ? extent : (maxv[1] - minv[1]);
  323. extent = (maxv[2] - minv[2]) < extent ? extent : (maxv[2] - minv[2]);
  324. if (result)
  325. {
  326. float scale = extent == 0 ? 0.f : 1.f / extent;
  327. for (size_t i = 0; i < vertex_count; ++i)
  328. {
  329. result[i].x = (result[i].x - minv[0]) * scale;
  330. result[i].y = (result[i].y - minv[1]) * scale;
  331. result[i].z = (result[i].z - minv[2]) * scale;
  332. }
  333. }
  334. return extent;
  335. }
  336. struct Quadric
  337. {
  338. float a00, a11, a22;
  339. float a10, a20, a21;
  340. float b0, b1, b2, c;
  341. float w;
  342. };
  343. struct Collapse
  344. {
  345. unsigned int v0;
  346. unsigned int v1;
  347. union
  348. {
  349. unsigned int bidi;
  350. float error;
  351. unsigned int errorui;
  352. };
  353. };
  354. static float normalize(Vector3& v)
  355. {
  356. float length = sqrtf(v.x * v.x + v.y * v.y + v.z * v.z);
  357. if (length > 0)
  358. {
  359. v.x /= length;
  360. v.y /= length;
  361. v.z /= length;
  362. }
  363. return length;
  364. }
  365. static void quadricAdd(Quadric& Q, const Quadric& R)
  366. {
  367. Q.a00 += R.a00;
  368. Q.a11 += R.a11;
  369. Q.a22 += R.a22;
  370. Q.a10 += R.a10;
  371. Q.a20 += R.a20;
  372. Q.a21 += R.a21;
  373. Q.b0 += R.b0;
  374. Q.b1 += R.b1;
  375. Q.b2 += R.b2;
  376. Q.c += R.c;
  377. Q.w += R.w;
  378. }
  379. static float quadricError(const Quadric& Q, const Vector3& v)
  380. {
  381. float rx = Q.b0;
  382. float ry = Q.b1;
  383. float rz = Q.b2;
  384. rx += Q.a10 * v.y;
  385. ry += Q.a21 * v.z;
  386. rz += Q.a20 * v.x;
  387. rx *= 2;
  388. ry *= 2;
  389. rz *= 2;
  390. rx += Q.a00 * v.x;
  391. ry += Q.a11 * v.y;
  392. rz += Q.a22 * v.z;
  393. float r = Q.c;
  394. r += rx * v.x;
  395. r += ry * v.y;
  396. r += rz * v.z;
  397. float s = Q.w == 0.f ? 0.f : 1.f / Q.w;
  398. return fabsf(r) * s;
  399. }
  400. static void quadricFromPlane(Quadric& Q, float a, float b, float c, float d, float w)
  401. {
  402. float aw = a * w;
  403. float bw = b * w;
  404. float cw = c * w;
  405. float dw = d * w;
  406. Q.a00 = a * aw;
  407. Q.a11 = b * bw;
  408. Q.a22 = c * cw;
  409. Q.a10 = a * bw;
  410. Q.a20 = a * cw;
  411. Q.a21 = b * cw;
  412. Q.b0 = a * dw;
  413. Q.b1 = b * dw;
  414. Q.b2 = c * dw;
  415. Q.c = d * dw;
  416. Q.w = w;
  417. }
  418. static void quadricFromPoint(Quadric& Q, float x, float y, float z, float w)
  419. {
  420. // we need to encode (x - X) ^ 2 + (y - Y)^2 + (z - Z)^2 into the quadric
  421. Q.a00 = w;
  422. Q.a11 = w;
  423. Q.a22 = w;
  424. Q.a10 = 0.f;
  425. Q.a20 = 0.f;
  426. Q.a21 = 0.f;
  427. Q.b0 = -2.f * x * w;
  428. Q.b1 = -2.f * y * w;
  429. Q.b2 = -2.f * z * w;
  430. Q.c = (x * x + y * y + z * z) * w;
  431. Q.w = w;
  432. }
  433. static void quadricFromTriangle(Quadric& Q, const Vector3& p0, const Vector3& p1, const Vector3& p2, float weight)
  434. {
  435. Vector3 p10 = {p1.x - p0.x, p1.y - p0.y, p1.z - p0.z};
  436. Vector3 p20 = {p2.x - p0.x, p2.y - p0.y, p2.z - p0.z};
  437. // normal = cross(p1 - p0, p2 - p0)
  438. Vector3 normal = {p10.y * p20.z - p10.z * p20.y, p10.z * p20.x - p10.x * p20.z, p10.x * p20.y - p10.y * p20.x};
  439. float area = normalize(normal);
  440. float distance = normal.x * p0.x + normal.y * p0.y + normal.z * p0.z;
  441. // we use sqrtf(area) so that the error is scaled linearly; this tends to improve silhouettes
  442. quadricFromPlane(Q, normal.x, normal.y, normal.z, -distance, sqrtf(area) * weight);
  443. }
  444. static void quadricFromTriangleEdge(Quadric& Q, const Vector3& p0, const Vector3& p1, const Vector3& p2, float weight)
  445. {
  446. Vector3 p10 = {p1.x - p0.x, p1.y - p0.y, p1.z - p0.z};
  447. float length = normalize(p10);
  448. // p20p = length of projection of p2-p0 onto normalize(p1 - p0)
  449. Vector3 p20 = {p2.x - p0.x, p2.y - p0.y, p2.z - p0.z};
  450. float p20p = p20.x * p10.x + p20.y * p10.y + p20.z * p10.z;
  451. // normal = altitude of triangle from point p2 onto edge p1-p0
  452. Vector3 normal = {p20.x - p10.x * p20p, p20.y - p10.y * p20p, p20.z - p10.z * p20p};
  453. normalize(normal);
  454. float distance = normal.x * p0.x + normal.y * p0.y + normal.z * p0.z;
  455. // note: the weight is scaled linearly with edge length; this has to match the triangle weight
  456. quadricFromPlane(Q, normal.x, normal.y, normal.z, -distance, length * weight);
  457. }
  458. static void fillFaceQuadrics(Quadric* vertex_quadrics, const unsigned int* indices, size_t index_count, const Vector3* vertex_positions, const unsigned int* remap)
  459. {
  460. for (size_t i = 0; i < index_count; i += 3)
  461. {
  462. unsigned int i0 = indices[i + 0];
  463. unsigned int i1 = indices[i + 1];
  464. unsigned int i2 = indices[i + 2];
  465. Quadric Q;
  466. quadricFromTriangle(Q, vertex_positions[i0], vertex_positions[i1], vertex_positions[i2], 1.f);
  467. quadricAdd(vertex_quadrics[remap[i0]], Q);
  468. quadricAdd(vertex_quadrics[remap[i1]], Q);
  469. quadricAdd(vertex_quadrics[remap[i2]], Q);
  470. }
  471. }
  472. static void fillEdgeQuadrics(Quadric* vertex_quadrics, const unsigned int* indices, size_t index_count, const Vector3* vertex_positions, const unsigned int* remap, const unsigned char* vertex_kind, const unsigned int* loop, const unsigned int* loopback)
  473. {
  474. for (size_t i = 0; i < index_count; i += 3)
  475. {
  476. static const int next[3] = {1, 2, 0};
  477. for (int e = 0; e < 3; ++e)
  478. {
  479. unsigned int i0 = indices[i + e];
  480. unsigned int i1 = indices[i + next[e]];
  481. unsigned char k0 = vertex_kind[i0];
  482. unsigned char k1 = vertex_kind[i1];
  483. // check that either i0 or i1 are border/seam and are on the same edge loop
  484. // note that we need to add the error even for edged that connect e.g. border & locked
  485. // if we don't do that, the adjacent border->border edge won't have correct errors for corners
  486. if (k0 != Kind_Border && k0 != Kind_Seam && k1 != Kind_Border && k1 != Kind_Seam)
  487. continue;
  488. if ((k0 == Kind_Border || k0 == Kind_Seam) && loop[i0] != i1)
  489. continue;
  490. if ((k1 == Kind_Border || k1 == Kind_Seam) && loopback[i1] != i0)
  491. continue;
  492. // seam edges should occur twice (i0->i1 and i1->i0) - skip redundant edges
  493. if (kHasOpposite[k0][k1] && remap[i1] > remap[i0])
  494. continue;
  495. unsigned int i2 = indices[i + next[next[e]]];
  496. // we try hard to maintain border edge geometry; seam edges can move more freely
  497. // due to topological restrictions on collapses, seam quadrics slightly improves collapse structure but aren't critical
  498. const float kEdgeWeightSeam = 1.f;
  499. const float kEdgeWeightBorder = 10.f;
  500. float edgeWeight = (k0 == Kind_Border || k1 == Kind_Border) ? kEdgeWeightBorder : kEdgeWeightSeam;
  501. Quadric Q;
  502. quadricFromTriangleEdge(Q, vertex_positions[i0], vertex_positions[i1], vertex_positions[i2], edgeWeight);
  503. quadricAdd(vertex_quadrics[remap[i0]], Q);
  504. quadricAdd(vertex_quadrics[remap[i1]], Q);
  505. }
  506. }
  507. }
  508. // does triangle ABC flip when C is replaced with D?
  509. static bool hasTriangleFlip(const Vector3& a, const Vector3& b, const Vector3& c, const Vector3& d)
  510. {
  511. Vector3 eb = {b.x - a.x, b.y - a.y, b.z - a.z};
  512. Vector3 ec = {c.x - a.x, c.y - a.y, c.z - a.z};
  513. Vector3 ed = {d.x - a.x, d.y - a.y, d.z - a.z};
  514. Vector3 nbc = {eb.y * ec.z - eb.z * ec.y, eb.z * ec.x - eb.x * ec.z, eb.x * ec.y - eb.y * ec.x};
  515. Vector3 nbd = {eb.y * ed.z - eb.z * ed.y, eb.z * ed.x - eb.x * ed.z, eb.x * ed.y - eb.y * ed.x};
  516. return nbc.x * nbd.x + nbc.y * nbd.y + nbc.z * nbd.z < 0;
  517. }
  518. static bool hasTriangleFlips(const EdgeAdjacency& adjacency, const Vector3* vertex_positions, const unsigned int* collapse_remap, unsigned int i0, unsigned int i1)
  519. {
  520. assert(collapse_remap[i0] == i0);
  521. assert(collapse_remap[i1] == i1);
  522. const Vector3& v0 = vertex_positions[i0];
  523. const Vector3& v1 = vertex_positions[i1];
  524. const EdgeAdjacency::Edge* edges = &adjacency.data[adjacency.offsets[i0]];
  525. size_t count = adjacency.counts[i0];
  526. for (size_t i = 0; i < count; ++i)
  527. {
  528. unsigned int a = collapse_remap[edges[i].next];
  529. unsigned int b = collapse_remap[edges[i].prev];
  530. // skip triangles that get collapsed
  531. // note: this is mathematically redundant as if either of these is true, the dot product in hasTriangleFlip should be 0
  532. if (a == i1 || b == i1)
  533. continue;
  534. // early-out when at least one triangle flips due to a collapse
  535. if (hasTriangleFlip(vertex_positions[a], vertex_positions[b], v0, v1))
  536. return true;
  537. }
  538. return false;
  539. }
  540. static size_t pickEdgeCollapses(Collapse* collapses, const unsigned int* indices, size_t index_count, const unsigned int* remap, const unsigned char* vertex_kind, const unsigned int* loop)
  541. {
  542. size_t collapse_count = 0;
  543. for (size_t i = 0; i < index_count; i += 3)
  544. {
  545. static const int next[3] = {1, 2, 0};
  546. for (int e = 0; e < 3; ++e)
  547. {
  548. unsigned int i0 = indices[i + e];
  549. unsigned int i1 = indices[i + next[e]];
  550. // this can happen either when input has a zero-length edge, or when we perform collapses for complex
  551. // topology w/seams and collapse a manifold vertex that connects to both wedges onto one of them
  552. // we leave edges like this alone since they may be important for preserving mesh integrity
  553. if (remap[i0] == remap[i1])
  554. continue;
  555. unsigned char k0 = vertex_kind[i0];
  556. unsigned char k1 = vertex_kind[i1];
  557. // the edge has to be collapsible in at least one direction
  558. if (!(kCanCollapse[k0][k1] | kCanCollapse[k1][k0]))
  559. continue;
  560. // manifold and seam edges should occur twice (i0->i1 and i1->i0) - skip redundant edges
  561. if (kHasOpposite[k0][k1] && remap[i1] > remap[i0])
  562. continue;
  563. // two vertices are on a border or a seam, but there's no direct edge between them
  564. // this indicates that they belong to two different edge loops and we should not collapse this edge
  565. // loop[] tracks half edges so we only need to check i0->i1
  566. if (k0 == k1 && (k0 == Kind_Border || k0 == Kind_Seam) && loop[i0] != i1)
  567. continue;
  568. // edge can be collapsed in either direction - we will pick the one with minimum error
  569. // note: we evaluate error later during collapse ranking, here we just tag the edge as bidirectional
  570. if (kCanCollapse[k0][k1] & kCanCollapse[k1][k0])
  571. {
  572. Collapse c = {i0, i1, {/* bidi= */ 1}};
  573. collapses[collapse_count++] = c;
  574. }
  575. else
  576. {
  577. // edge can only be collapsed in one direction
  578. unsigned int e0 = kCanCollapse[k0][k1] ? i0 : i1;
  579. unsigned int e1 = kCanCollapse[k0][k1] ? i1 : i0;
  580. Collapse c = {e0, e1, {/* bidi= */ 0}};
  581. collapses[collapse_count++] = c;
  582. }
  583. }
  584. }
  585. return collapse_count;
  586. }
  587. static void rankEdgeCollapses(Collapse* collapses, size_t collapse_count, const Vector3* vertex_positions, const Quadric* vertex_quadrics, const unsigned int* remap)
  588. {
  589. for (size_t i = 0; i < collapse_count; ++i)
  590. {
  591. Collapse& c = collapses[i];
  592. unsigned int i0 = c.v0;
  593. unsigned int i1 = c.v1;
  594. // most edges are bidirectional which means we need to evaluate errors for two collapses
  595. // to keep this code branchless we just use the same edge for unidirectional edges
  596. unsigned int j0 = c.bidi ? i1 : i0;
  597. unsigned int j1 = c.bidi ? i0 : i1;
  598. const Quadric& qi = vertex_quadrics[remap[i0]];
  599. const Quadric& qj = vertex_quadrics[remap[j0]];
  600. float ei = quadricError(qi, vertex_positions[i1]);
  601. float ej = quadricError(qj, vertex_positions[j1]);
  602. // pick edge direction with minimal error
  603. c.v0 = ei <= ej ? i0 : j0;
  604. c.v1 = ei <= ej ? i1 : j1;
  605. c.error = ei <= ej ? ei : ej;
  606. }
  607. }
  608. #if TRACE > 1
  609. static void dumpEdgeCollapses(const Collapse* collapses, size_t collapse_count, const unsigned char* vertex_kind)
  610. {
  611. size_t ckinds[Kind_Count][Kind_Count] = {};
  612. float cerrors[Kind_Count][Kind_Count] = {};
  613. for (int k0 = 0; k0 < Kind_Count; ++k0)
  614. for (int k1 = 0; k1 < Kind_Count; ++k1)
  615. cerrors[k0][k1] = FLT_MAX;
  616. for (size_t i = 0; i < collapse_count; ++i)
  617. {
  618. unsigned int i0 = collapses[i].v0;
  619. unsigned int i1 = collapses[i].v1;
  620. unsigned char k0 = vertex_kind[i0];
  621. unsigned char k1 = vertex_kind[i1];
  622. ckinds[k0][k1]++;
  623. cerrors[k0][k1] = (collapses[i].error < cerrors[k0][k1]) ? collapses[i].error : cerrors[k0][k1];
  624. }
  625. for (int k0 = 0; k0 < Kind_Count; ++k0)
  626. for (int k1 = 0; k1 < Kind_Count; ++k1)
  627. if (ckinds[k0][k1])
  628. printf("collapses %d -> %d: %d, min error %e\n", k0, k1, int(ckinds[k0][k1]), ckinds[k0][k1] ? sqrtf(cerrors[k0][k1]) : 0.f);
  629. }
  630. static void dumpLockedCollapses(const unsigned int* indices, size_t index_count, const unsigned char* vertex_kind)
  631. {
  632. size_t locked_collapses[Kind_Count][Kind_Count] = {};
  633. for (size_t i = 0; i < index_count; i += 3)
  634. {
  635. static const int next[3] = {1, 2, 0};
  636. for (int e = 0; e < 3; ++e)
  637. {
  638. unsigned int i0 = indices[i + e];
  639. unsigned int i1 = indices[i + next[e]];
  640. unsigned char k0 = vertex_kind[i0];
  641. unsigned char k1 = vertex_kind[i1];
  642. locked_collapses[k0][k1] += !kCanCollapse[k0][k1] && !kCanCollapse[k1][k0];
  643. }
  644. }
  645. for (int k0 = 0; k0 < Kind_Count; ++k0)
  646. for (int k1 = 0; k1 < Kind_Count; ++k1)
  647. if (locked_collapses[k0][k1])
  648. printf("locked collapses %d -> %d: %d\n", k0, k1, int(locked_collapses[k0][k1]));
  649. }
  650. #endif
  651. static void sortEdgeCollapses(unsigned int* sort_order, const Collapse* collapses, size_t collapse_count)
  652. {
  653. const int sort_bits = 11;
  654. // fill histogram for counting sort
  655. unsigned int histogram[1 << sort_bits];
  656. memset(histogram, 0, sizeof(histogram));
  657. for (size_t i = 0; i < collapse_count; ++i)
  658. {
  659. // skip sign bit since error is non-negative
  660. unsigned int key = (collapses[i].errorui << 1) >> (32 - sort_bits);
  661. histogram[key]++;
  662. }
  663. // compute offsets based on histogram data
  664. size_t histogram_sum = 0;
  665. for (size_t i = 0; i < 1 << sort_bits; ++i)
  666. {
  667. size_t count = histogram[i];
  668. histogram[i] = unsigned(histogram_sum);
  669. histogram_sum += count;
  670. }
  671. assert(histogram_sum == collapse_count);
  672. // compute sort order based on offsets
  673. for (size_t i = 0; i < collapse_count; ++i)
  674. {
  675. // skip sign bit since error is non-negative
  676. unsigned int key = (collapses[i].errorui << 1) >> (32 - sort_bits);
  677. sort_order[histogram[key]++] = unsigned(i);
  678. }
  679. }
  680. static size_t performEdgeCollapses(unsigned int* collapse_remap, unsigned char* collapse_locked, Quadric* vertex_quadrics, const Collapse* collapses, size_t collapse_count, const unsigned int* collapse_order, const unsigned int* remap, const unsigned int* wedge, const unsigned char* vertex_kind, const Vector3* vertex_positions, const EdgeAdjacency& adjacency, size_t triangle_collapse_goal, float error_limit, float& result_error)
  681. {
  682. size_t edge_collapses = 0;
  683. size_t triangle_collapses = 0;
  684. // most collapses remove 2 triangles; use this to establish a bound on the pass in terms of error limit
  685. // note that edge_collapse_goal is an estimate; triangle_collapse_goal will be used to actually limit collapses
  686. size_t edge_collapse_goal = triangle_collapse_goal / 2;
  687. #if TRACE
  688. size_t stats[4] = {};
  689. #endif
  690. for (size_t i = 0; i < collapse_count; ++i)
  691. {
  692. const Collapse& c = collapses[collapse_order[i]];
  693. TRACESTATS(0);
  694. if (c.error > error_limit)
  695. break;
  696. if (triangle_collapses >= triangle_collapse_goal)
  697. break;
  698. // we limit the error in each pass based on the error of optimal last collapse; since many collapses will be locked
  699. // as they will share vertices with other successfull collapses, we need to increase the acceptable error by some factor
  700. float error_goal = edge_collapse_goal < collapse_count ? 1.5f * collapses[collapse_order[edge_collapse_goal]].error : FLT_MAX;
  701. // on average, each collapse is expected to lock 6 other collapses; to avoid degenerate passes on meshes with odd
  702. // topology, we only abort if we got over 1/6 collapses accordingly.
  703. if (c.error > error_goal && triangle_collapses > triangle_collapse_goal / 6)
  704. break;
  705. unsigned int i0 = c.v0;
  706. unsigned int i1 = c.v1;
  707. unsigned int r0 = remap[i0];
  708. unsigned int r1 = remap[i1];
  709. // we don't collapse vertices that had source or target vertex involved in a collapse
  710. // it's important to not move the vertices twice since it complicates the tracking/remapping logic
  711. // it's important to not move other vertices towards a moved vertex to preserve error since we don't re-rank collapses mid-pass
  712. if (collapse_locked[r0] | collapse_locked[r1])
  713. {
  714. TRACESTATS(1);
  715. continue;
  716. }
  717. if (hasTriangleFlips(adjacency, vertex_positions, collapse_remap, r0, r1))
  718. {
  719. // adjust collapse goal since this collapse is invalid and shouldn't factor into error goal
  720. edge_collapse_goal++;
  721. TRACESTATS(2);
  722. continue;
  723. }
  724. assert(collapse_remap[r0] == r0);
  725. assert(collapse_remap[r1] == r1);
  726. quadricAdd(vertex_quadrics[r1], vertex_quadrics[r0]);
  727. if (vertex_kind[i0] == Kind_Complex)
  728. {
  729. unsigned int v = i0;
  730. do
  731. {
  732. collapse_remap[v] = r1;
  733. v = wedge[v];
  734. } while (v != i0);
  735. }
  736. else if (vertex_kind[i0] == Kind_Seam)
  737. {
  738. // remap v0 to v1 and seam pair of v0 to seam pair of v1
  739. unsigned int s0 = wedge[i0];
  740. unsigned int s1 = wedge[i1];
  741. assert(s0 != i0 && s1 != i1);
  742. assert(wedge[s0] == i0 && wedge[s1] == i1);
  743. collapse_remap[i0] = i1;
  744. collapse_remap[s0] = s1;
  745. }
  746. else
  747. {
  748. assert(wedge[i0] == i0);
  749. collapse_remap[i0] = i1;
  750. }
  751. collapse_locked[r0] = 1;
  752. collapse_locked[r1] = 1;
  753. // border edges collapse 1 triangle, other edges collapse 2 or more
  754. triangle_collapses += (vertex_kind[i0] == Kind_Border) ? 1 : 2;
  755. edge_collapses++;
  756. result_error = result_error < c.error ? c.error : result_error;
  757. }
  758. #if TRACE
  759. float error_goal_perfect = edge_collapse_goal < collapse_count ? collapses[collapse_order[edge_collapse_goal]].error : 0.f;
  760. printf("removed %d triangles, error %e (goal %e); evaluated %d/%d collapses (done %d, skipped %d, invalid %d)\n",
  761. int(triangle_collapses), sqrtf(result_error), sqrtf(error_goal_perfect),
  762. int(stats[0]), int(collapse_count), int(edge_collapses), int(stats[1]), int(stats[2]));
  763. #endif
  764. return edge_collapses;
  765. }
  766. static size_t remapIndexBuffer(unsigned int* indices, size_t index_count, const unsigned int* collapse_remap)
  767. {
  768. size_t write = 0;
  769. for (size_t i = 0; i < index_count; i += 3)
  770. {
  771. unsigned int v0 = collapse_remap[indices[i + 0]];
  772. unsigned int v1 = collapse_remap[indices[i + 1]];
  773. unsigned int v2 = collapse_remap[indices[i + 2]];
  774. // we never move the vertex twice during a single pass
  775. assert(collapse_remap[v0] == v0);
  776. assert(collapse_remap[v1] == v1);
  777. assert(collapse_remap[v2] == v2);
  778. if (v0 != v1 && v0 != v2 && v1 != v2)
  779. {
  780. indices[write + 0] = v0;
  781. indices[write + 1] = v1;
  782. indices[write + 2] = v2;
  783. write += 3;
  784. }
  785. }
  786. return write;
  787. }
  788. static void remapEdgeLoops(unsigned int* loop, size_t vertex_count, const unsigned int* collapse_remap)
  789. {
  790. for (size_t i = 0; i < vertex_count; ++i)
  791. {
  792. if (loop[i] != ~0u)
  793. {
  794. unsigned int l = loop[i];
  795. unsigned int r = collapse_remap[l];
  796. // i == r is a special case when the seam edge is collapsed in a direction opposite to where loop goes
  797. loop[i] = (i == r) ? loop[l] : r;
  798. }
  799. }
  800. }
  801. struct CellHasher
  802. {
  803. const unsigned int* vertex_ids;
  804. size_t hash(unsigned int i) const
  805. {
  806. unsigned int h = vertex_ids[i];
  807. // MurmurHash2 finalizer
  808. h ^= h >> 13;
  809. h *= 0x5bd1e995;
  810. h ^= h >> 15;
  811. return h;
  812. }
  813. bool equal(unsigned int lhs, unsigned int rhs) const
  814. {
  815. return vertex_ids[lhs] == vertex_ids[rhs];
  816. }
  817. };
  818. struct IdHasher
  819. {
  820. size_t hash(unsigned int id) const
  821. {
  822. unsigned int h = id;
  823. // MurmurHash2 finalizer
  824. h ^= h >> 13;
  825. h *= 0x5bd1e995;
  826. h ^= h >> 15;
  827. return h;
  828. }
  829. bool equal(unsigned int lhs, unsigned int rhs) const
  830. {
  831. return lhs == rhs;
  832. }
  833. };
  834. struct TriangleHasher
  835. {
  836. unsigned int* indices;
  837. size_t hash(unsigned int i) const
  838. {
  839. const unsigned int* tri = indices + i * 3;
  840. // Optimized Spatial Hashing for Collision Detection of Deformable Objects
  841. return (tri[0] * 73856093) ^ (tri[1] * 19349663) ^ (tri[2] * 83492791);
  842. }
  843. bool equal(unsigned int lhs, unsigned int rhs) const
  844. {
  845. const unsigned int* lt = indices + lhs * 3;
  846. const unsigned int* rt = indices + rhs * 3;
  847. return lt[0] == rt[0] && lt[1] == rt[1] && lt[2] == rt[2];
  848. }
  849. };
  850. static void computeVertexIds(unsigned int* vertex_ids, const Vector3* vertex_positions, size_t vertex_count, int grid_size)
  851. {
  852. assert(grid_size >= 1 && grid_size <= 1024);
  853. float cell_scale = float(grid_size - 1);
  854. for (size_t i = 0; i < vertex_count; ++i)
  855. {
  856. const Vector3& v = vertex_positions[i];
  857. int xi = int(v.x * cell_scale + 0.5f);
  858. int yi = int(v.y * cell_scale + 0.5f);
  859. int zi = int(v.z * cell_scale + 0.5f);
  860. vertex_ids[i] = (xi << 20) | (yi << 10) | zi;
  861. }
  862. }
  863. static size_t countTriangles(const unsigned int* vertex_ids, const unsigned int* indices, size_t index_count)
  864. {
  865. size_t result = 0;
  866. for (size_t i = 0; i < index_count; i += 3)
  867. {
  868. unsigned int id0 = vertex_ids[indices[i + 0]];
  869. unsigned int id1 = vertex_ids[indices[i + 1]];
  870. unsigned int id2 = vertex_ids[indices[i + 2]];
  871. result += (id0 != id1) & (id0 != id2) & (id1 != id2);
  872. }
  873. return result;
  874. }
  875. static size_t fillVertexCells(unsigned int* table, size_t table_size, unsigned int* vertex_cells, const unsigned int* vertex_ids, size_t vertex_count)
  876. {
  877. CellHasher hasher = {vertex_ids};
  878. memset(table, -1, table_size * sizeof(unsigned int));
  879. size_t result = 0;
  880. for (size_t i = 0; i < vertex_count; ++i)
  881. {
  882. unsigned int* entry = hashLookup2(table, table_size, hasher, unsigned(i), ~0u);
  883. if (*entry == ~0u)
  884. {
  885. *entry = unsigned(i);
  886. vertex_cells[i] = unsigned(result++);
  887. }
  888. else
  889. {
  890. vertex_cells[i] = vertex_cells[*entry];
  891. }
  892. }
  893. return result;
  894. }
  895. static size_t countVertexCells(unsigned int* table, size_t table_size, const unsigned int* vertex_ids, size_t vertex_count)
  896. {
  897. IdHasher hasher;
  898. memset(table, -1, table_size * sizeof(unsigned int));
  899. size_t result = 0;
  900. for (size_t i = 0; i < vertex_count; ++i)
  901. {
  902. unsigned int id = vertex_ids[i];
  903. unsigned int* entry = hashLookup2(table, table_size, hasher, id, ~0u);
  904. result += (*entry == ~0u);
  905. *entry = id;
  906. }
  907. return result;
  908. }
  909. static void fillCellQuadrics(Quadric* cell_quadrics, const unsigned int* indices, size_t index_count, const Vector3* vertex_positions, const unsigned int* vertex_cells)
  910. {
  911. for (size_t i = 0; i < index_count; i += 3)
  912. {
  913. unsigned int i0 = indices[i + 0];
  914. unsigned int i1 = indices[i + 1];
  915. unsigned int i2 = indices[i + 2];
  916. unsigned int c0 = vertex_cells[i0];
  917. unsigned int c1 = vertex_cells[i1];
  918. unsigned int c2 = vertex_cells[i2];
  919. bool single_cell = (c0 == c1) & (c0 == c2);
  920. Quadric Q;
  921. quadricFromTriangle(Q, vertex_positions[i0], vertex_positions[i1], vertex_positions[i2], single_cell ? 3.f : 1.f);
  922. if (single_cell)
  923. {
  924. quadricAdd(cell_quadrics[c0], Q);
  925. }
  926. else
  927. {
  928. quadricAdd(cell_quadrics[c0], Q);
  929. quadricAdd(cell_quadrics[c1], Q);
  930. quadricAdd(cell_quadrics[c2], Q);
  931. }
  932. }
  933. }
  934. static void fillCellQuadrics(Quadric* cell_quadrics, const Vector3* vertex_positions, size_t vertex_count, const unsigned int* vertex_cells)
  935. {
  936. for (size_t i = 0; i < vertex_count; ++i)
  937. {
  938. unsigned int c = vertex_cells[i];
  939. const Vector3& v = vertex_positions[i];
  940. Quadric Q;
  941. quadricFromPoint(Q, v.x, v.y, v.z, 1.f);
  942. quadricAdd(cell_quadrics[c], Q);
  943. }
  944. }
  945. static void fillCellRemap(unsigned int* cell_remap, float* cell_errors, size_t cell_count, const unsigned int* vertex_cells, const Quadric* cell_quadrics, const Vector3* vertex_positions, size_t vertex_count)
  946. {
  947. memset(cell_remap, -1, cell_count * sizeof(unsigned int));
  948. for (size_t i = 0; i < vertex_count; ++i)
  949. {
  950. unsigned int cell = vertex_cells[i];
  951. float error = quadricError(cell_quadrics[cell], vertex_positions[i]);
  952. if (cell_remap[cell] == ~0u || cell_errors[cell] > error)
  953. {
  954. cell_remap[cell] = unsigned(i);
  955. cell_errors[cell] = error;
  956. }
  957. }
  958. }
  959. static size_t filterTriangles(unsigned int* destination, unsigned int* tritable, size_t tritable_size, const unsigned int* indices, size_t index_count, const unsigned int* vertex_cells, const unsigned int* cell_remap)
  960. {
  961. TriangleHasher hasher = {destination};
  962. memset(tritable, -1, tritable_size * sizeof(unsigned int));
  963. size_t result = 0;
  964. for (size_t i = 0; i < index_count; i += 3)
  965. {
  966. unsigned int c0 = vertex_cells[indices[i + 0]];
  967. unsigned int c1 = vertex_cells[indices[i + 1]];
  968. unsigned int c2 = vertex_cells[indices[i + 2]];
  969. if (c0 != c1 && c0 != c2 && c1 != c2)
  970. {
  971. unsigned int a = cell_remap[c0];
  972. unsigned int b = cell_remap[c1];
  973. unsigned int c = cell_remap[c2];
  974. if (b < a && b < c)
  975. {
  976. unsigned int t = a;
  977. a = b, b = c, c = t;
  978. }
  979. else if (c < a && c < b)
  980. {
  981. unsigned int t = c;
  982. c = b, b = a, a = t;
  983. }
  984. destination[result * 3 + 0] = a;
  985. destination[result * 3 + 1] = b;
  986. destination[result * 3 + 2] = c;
  987. unsigned int* entry = hashLookup2(tritable, tritable_size, hasher, unsigned(result), ~0u);
  988. if (*entry == ~0u)
  989. *entry = unsigned(result++);
  990. }
  991. }
  992. return result * 3;
  993. }
  994. static float interpolate(float y, float x0, float y0, float x1, float y1, float x2, float y2)
  995. {
  996. // three point interpolation from "revenge of interpolation search" paper
  997. float num = (y1 - y) * (x1 - x2) * (x1 - x0) * (y2 - y0);
  998. float den = (y2 - y) * (x1 - x2) * (y0 - y1) + (y0 - y) * (x1 - x0) * (y1 - y2);
  999. return x1 + num / den;
  1000. }
  1001. } // namespace meshopt
  1002. #ifndef NDEBUG
  1003. unsigned char* meshopt_simplifyDebugKind = 0;
  1004. unsigned int* meshopt_simplifyDebugLoop = 0;
  1005. unsigned int* meshopt_simplifyDebugLoopBack = 0;
  1006. #endif
  1007. size_t meshopt_simplify(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions_data, size_t vertex_count, size_t vertex_positions_stride, size_t target_index_count, float target_error, float* out_result_error)
  1008. {
  1009. using namespace meshopt;
  1010. assert(index_count % 3 == 0);
  1011. assert(vertex_positions_stride > 0 && vertex_positions_stride <= 256);
  1012. assert(vertex_positions_stride % sizeof(float) == 0);
  1013. assert(target_index_count <= index_count);
  1014. meshopt_Allocator allocator;
  1015. unsigned int* result = destination;
  1016. // build adjacency information
  1017. EdgeAdjacency adjacency = {};
  1018. prepareEdgeAdjacency(adjacency, index_count, vertex_count, allocator);
  1019. updateEdgeAdjacency(adjacency, indices, index_count, vertex_count, NULL);
  1020. // build position remap that maps each vertex to the one with identical position
  1021. unsigned int* remap = allocator.allocate<unsigned int>(vertex_count);
  1022. unsigned int* wedge = allocator.allocate<unsigned int>(vertex_count);
  1023. buildPositionRemap(remap, wedge, vertex_positions_data, vertex_count, vertex_positions_stride, allocator);
  1024. // classify vertices; vertex kind determines collapse rules, see kCanCollapse
  1025. unsigned char* vertex_kind = allocator.allocate<unsigned char>(vertex_count);
  1026. unsigned int* loop = allocator.allocate<unsigned int>(vertex_count);
  1027. unsigned int* loopback = allocator.allocate<unsigned int>(vertex_count);
  1028. classifyVertices(vertex_kind, loop, loopback, vertex_count, adjacency, remap, wedge);
  1029. #if TRACE
  1030. size_t unique_positions = 0;
  1031. for (size_t i = 0; i < vertex_count; ++i)
  1032. unique_positions += remap[i] == i;
  1033. printf("position remap: %d vertices => %d positions\n", int(vertex_count), int(unique_positions));
  1034. size_t kinds[Kind_Count] = {};
  1035. for (size_t i = 0; i < vertex_count; ++i)
  1036. kinds[vertex_kind[i]] += remap[i] == i;
  1037. printf("kinds: manifold %d, border %d, seam %d, complex %d, locked %d\n",
  1038. int(kinds[Kind_Manifold]), int(kinds[Kind_Border]), int(kinds[Kind_Seam]), int(kinds[Kind_Complex]), int(kinds[Kind_Locked]));
  1039. #endif
  1040. Vector3* vertex_positions = allocator.allocate<Vector3>(vertex_count);
  1041. rescalePositions(vertex_positions, vertex_positions_data, vertex_count, vertex_positions_stride);
  1042. Quadric* vertex_quadrics = allocator.allocate<Quadric>(vertex_count);
  1043. memset(vertex_quadrics, 0, vertex_count * sizeof(Quadric));
  1044. fillFaceQuadrics(vertex_quadrics, indices, index_count, vertex_positions, remap);
  1045. fillEdgeQuadrics(vertex_quadrics, indices, index_count, vertex_positions, remap, vertex_kind, loop, loopback);
  1046. if (result != indices)
  1047. memcpy(result, indices, index_count * sizeof(unsigned int));
  1048. #if TRACE
  1049. size_t pass_count = 0;
  1050. #endif
  1051. Collapse* edge_collapses = allocator.allocate<Collapse>(index_count);
  1052. unsigned int* collapse_order = allocator.allocate<unsigned int>(index_count);
  1053. unsigned int* collapse_remap = allocator.allocate<unsigned int>(vertex_count);
  1054. unsigned char* collapse_locked = allocator.allocate<unsigned char>(vertex_count);
  1055. size_t result_count = index_count;
  1056. float result_error = 0;
  1057. // target_error input is linear; we need to adjust it to match quadricError units
  1058. float error_limit = target_error * target_error;
  1059. while (result_count > target_index_count)
  1060. {
  1061. // note: throughout the simplification process adjacency structure reflects welded topology for result-in-progress
  1062. updateEdgeAdjacency(adjacency, result, result_count, vertex_count, remap);
  1063. size_t edge_collapse_count = pickEdgeCollapses(edge_collapses, result, result_count, remap, vertex_kind, loop);
  1064. // no edges can be collapsed any more due to topology restrictions
  1065. if (edge_collapse_count == 0)
  1066. break;
  1067. rankEdgeCollapses(edge_collapses, edge_collapse_count, vertex_positions, vertex_quadrics, remap);
  1068. #if TRACE > 1
  1069. dumpEdgeCollapses(edge_collapses, edge_collapse_count, vertex_kind);
  1070. #endif
  1071. sortEdgeCollapses(collapse_order, edge_collapses, edge_collapse_count);
  1072. size_t triangle_collapse_goal = (result_count - target_index_count) / 3;
  1073. for (size_t i = 0; i < vertex_count; ++i)
  1074. collapse_remap[i] = unsigned(i);
  1075. memset(collapse_locked, 0, vertex_count);
  1076. #if TRACE
  1077. printf("pass %d: ", int(pass_count++));
  1078. #endif
  1079. size_t collapses = performEdgeCollapses(collapse_remap, collapse_locked, vertex_quadrics, edge_collapses, edge_collapse_count, collapse_order, remap, wedge, vertex_kind, vertex_positions, adjacency, triangle_collapse_goal, error_limit, result_error);
  1080. // no edges can be collapsed any more due to hitting the error limit or triangle collapse limit
  1081. if (collapses == 0)
  1082. break;
  1083. remapEdgeLoops(loop, vertex_count, collapse_remap);
  1084. remapEdgeLoops(loopback, vertex_count, collapse_remap);
  1085. size_t new_count = remapIndexBuffer(result, result_count, collapse_remap);
  1086. assert(new_count < result_count);
  1087. result_count = new_count;
  1088. }
  1089. #if TRACE
  1090. printf("result: %d triangles, error: %e; total %d passes\n", int(result_count), sqrtf(result_error), int(pass_count));
  1091. #endif
  1092. #if TRACE > 1
  1093. dumpLockedCollapses(result, result_count, vertex_kind);
  1094. #endif
  1095. #ifndef NDEBUG
  1096. if (meshopt_simplifyDebugKind)
  1097. memcpy(meshopt_simplifyDebugKind, vertex_kind, vertex_count);
  1098. if (meshopt_simplifyDebugLoop)
  1099. memcpy(meshopt_simplifyDebugLoop, loop, vertex_count * sizeof(unsigned int));
  1100. if (meshopt_simplifyDebugLoopBack)
  1101. memcpy(meshopt_simplifyDebugLoopBack, loopback, vertex_count * sizeof(unsigned int));
  1102. #endif
  1103. // result_error is quadratic; we need to remap it back to linear
  1104. if (out_result_error)
  1105. *out_result_error = sqrtf(result_error);
  1106. return result_count;
  1107. }
  1108. size_t meshopt_simplifySloppy(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions_data, size_t vertex_count, size_t vertex_positions_stride, size_t target_index_count, float target_error, float* out_result_error)
  1109. {
  1110. using namespace meshopt;
  1111. assert(index_count % 3 == 0);
  1112. assert(vertex_positions_stride > 0 && vertex_positions_stride <= 256);
  1113. assert(vertex_positions_stride % sizeof(float) == 0);
  1114. assert(target_index_count <= index_count);
  1115. // we expect to get ~2 triangles/vertex in the output
  1116. size_t target_cell_count = target_index_count / 6;
  1117. meshopt_Allocator allocator;
  1118. Vector3* vertex_positions = allocator.allocate<Vector3>(vertex_count);
  1119. rescalePositions(vertex_positions, vertex_positions_data, vertex_count, vertex_positions_stride);
  1120. // find the optimal grid size using guided binary search
  1121. #if TRACE
  1122. printf("source: %d vertices, %d triangles\n", int(vertex_count), int(index_count / 3));
  1123. printf("target: %d cells, %d triangles\n", int(target_cell_count), int(target_index_count / 3));
  1124. #endif
  1125. unsigned int* vertex_ids = allocator.allocate<unsigned int>(vertex_count);
  1126. const int kInterpolationPasses = 5;
  1127. // invariant: # of triangles in min_grid <= target_count
  1128. int min_grid = int(1.f / (target_error < 1e-3f ? 1e-3f : target_error));
  1129. int max_grid = 1025;
  1130. size_t min_triangles = 0;
  1131. size_t max_triangles = index_count / 3;
  1132. // when we're error-limited, we compute the triangle count for the min. size; this accelerates convergence and provides the correct answer when we can't use a larger grid
  1133. if (min_grid > 1)
  1134. {
  1135. computeVertexIds(vertex_ids, vertex_positions, vertex_count, min_grid);
  1136. min_triangles = countTriangles(vertex_ids, indices, index_count);
  1137. }
  1138. // instead of starting in the middle, let's guess as to what the answer might be! triangle count usually grows as a square of grid size...
  1139. int next_grid_size = int(sqrtf(float(target_cell_count)) + 0.5f);
  1140. for (int pass = 0; pass < 10 + kInterpolationPasses; ++pass)
  1141. {
  1142. if (min_triangles >= target_index_count / 3 || max_grid - min_grid <= 1)
  1143. break;
  1144. // we clamp the prediction of the grid size to make sure that the search converges
  1145. int grid_size = next_grid_size;
  1146. grid_size = (grid_size <= min_grid) ? min_grid + 1 : (grid_size >= max_grid) ? max_grid - 1 : grid_size;
  1147. computeVertexIds(vertex_ids, vertex_positions, vertex_count, grid_size);
  1148. size_t triangles = countTriangles(vertex_ids, indices, index_count);
  1149. #if TRACE
  1150. printf("pass %d (%s): grid size %d, triangles %d, %s\n",
  1151. pass, (pass == 0) ? "guess" : (pass <= kInterpolationPasses) ? "lerp" : "binary",
  1152. grid_size, int(triangles),
  1153. (triangles <= target_index_count / 3) ? "under" : "over");
  1154. #endif
  1155. float tip = interpolate(float(target_index_count / 3), float(min_grid), float(min_triangles), float(grid_size), float(triangles), float(max_grid), float(max_triangles));
  1156. if (triangles <= target_index_count / 3)
  1157. {
  1158. min_grid = grid_size;
  1159. min_triangles = triangles;
  1160. }
  1161. else
  1162. {
  1163. max_grid = grid_size;
  1164. max_triangles = triangles;
  1165. }
  1166. // we start by using interpolation search - it usually converges faster
  1167. // however, interpolation search has a worst case of O(N) so we switch to binary search after a few iterations which converges in O(logN)
  1168. next_grid_size = (pass < kInterpolationPasses) ? int(tip + 0.5f) : (min_grid + max_grid) / 2;
  1169. }
  1170. if (min_triangles == 0)
  1171. {
  1172. if (out_result_error)
  1173. *out_result_error = 1.f;
  1174. return 0;
  1175. }
  1176. // build vertex->cell association by mapping all vertices with the same quantized position to the same cell
  1177. size_t table_size = hashBuckets2(vertex_count);
  1178. unsigned int* table = allocator.allocate<unsigned int>(table_size);
  1179. unsigned int* vertex_cells = allocator.allocate<unsigned int>(vertex_count);
  1180. computeVertexIds(vertex_ids, vertex_positions, vertex_count, min_grid);
  1181. size_t cell_count = fillVertexCells(table, table_size, vertex_cells, vertex_ids, vertex_count);
  1182. // build a quadric for each target cell
  1183. Quadric* cell_quadrics = allocator.allocate<Quadric>(cell_count);
  1184. memset(cell_quadrics, 0, cell_count * sizeof(Quadric));
  1185. fillCellQuadrics(cell_quadrics, indices, index_count, vertex_positions, vertex_cells);
  1186. // for each target cell, find the vertex with the minimal error
  1187. unsigned int* cell_remap = allocator.allocate<unsigned int>(cell_count);
  1188. float* cell_errors = allocator.allocate<float>(cell_count);
  1189. fillCellRemap(cell_remap, cell_errors, cell_count, vertex_cells, cell_quadrics, vertex_positions, vertex_count);
  1190. // compute error
  1191. float result_error = 0.f;
  1192. for (size_t i = 0; i < cell_count; ++i)
  1193. result_error = result_error < cell_errors[i] ? cell_errors[i] : result_error;
  1194. // collapse triangles!
  1195. // note that we need to filter out triangles that we've already output because we very frequently generate redundant triangles between cells :(
  1196. size_t tritable_size = hashBuckets2(min_triangles);
  1197. unsigned int* tritable = allocator.allocate<unsigned int>(tritable_size);
  1198. size_t write = filterTriangles(destination, tritable, tritable_size, indices, index_count, vertex_cells, cell_remap);
  1199. #if TRACE
  1200. printf("result: %d cells, %d triangles (%d unfiltered), error %e\n", int(cell_count), int(write / 3), int(min_triangles), sqrtf(result_error));
  1201. #endif
  1202. if (out_result_error)
  1203. *out_result_error = sqrtf(result_error);
  1204. return write;
  1205. }
  1206. size_t meshopt_simplifyPoints(unsigned int* destination, const float* vertex_positions_data, size_t vertex_count, size_t vertex_positions_stride, size_t target_vertex_count)
  1207. {
  1208. using namespace meshopt;
  1209. assert(vertex_positions_stride > 0 && vertex_positions_stride <= 256);
  1210. assert(vertex_positions_stride % sizeof(float) == 0);
  1211. assert(target_vertex_count <= vertex_count);
  1212. size_t target_cell_count = target_vertex_count;
  1213. if (target_cell_count == 0)
  1214. return 0;
  1215. meshopt_Allocator allocator;
  1216. Vector3* vertex_positions = allocator.allocate<Vector3>(vertex_count);
  1217. rescalePositions(vertex_positions, vertex_positions_data, vertex_count, vertex_positions_stride);
  1218. // find the optimal grid size using guided binary search
  1219. #if TRACE
  1220. printf("source: %d vertices\n", int(vertex_count));
  1221. printf("target: %d cells\n", int(target_cell_count));
  1222. #endif
  1223. unsigned int* vertex_ids = allocator.allocate<unsigned int>(vertex_count);
  1224. size_t table_size = hashBuckets2(vertex_count);
  1225. unsigned int* table = allocator.allocate<unsigned int>(table_size);
  1226. const int kInterpolationPasses = 5;
  1227. // invariant: # of vertices in min_grid <= target_count
  1228. int min_grid = 0;
  1229. int max_grid = 1025;
  1230. size_t min_vertices = 0;
  1231. size_t max_vertices = vertex_count;
  1232. // instead of starting in the middle, let's guess as to what the answer might be! triangle count usually grows as a square of grid size...
  1233. int next_grid_size = int(sqrtf(float(target_cell_count)) + 0.5f);
  1234. for (int pass = 0; pass < 10 + kInterpolationPasses; ++pass)
  1235. {
  1236. assert(min_vertices < target_vertex_count);
  1237. assert(max_grid - min_grid > 1);
  1238. // we clamp the prediction of the grid size to make sure that the search converges
  1239. int grid_size = next_grid_size;
  1240. grid_size = (grid_size <= min_grid) ? min_grid + 1 : (grid_size >= max_grid) ? max_grid - 1 : grid_size;
  1241. computeVertexIds(vertex_ids, vertex_positions, vertex_count, grid_size);
  1242. size_t vertices = countVertexCells(table, table_size, vertex_ids, vertex_count);
  1243. #if TRACE
  1244. printf("pass %d (%s): grid size %d, vertices %d, %s\n",
  1245. pass, (pass == 0) ? "guess" : (pass <= kInterpolationPasses) ? "lerp" : "binary",
  1246. grid_size, int(vertices),
  1247. (vertices <= target_vertex_count) ? "under" : "over");
  1248. #endif
  1249. float tip = interpolate(float(target_vertex_count), float(min_grid), float(min_vertices), float(grid_size), float(vertices), float(max_grid), float(max_vertices));
  1250. if (vertices <= target_vertex_count)
  1251. {
  1252. min_grid = grid_size;
  1253. min_vertices = vertices;
  1254. }
  1255. else
  1256. {
  1257. max_grid = grid_size;
  1258. max_vertices = vertices;
  1259. }
  1260. if (vertices == target_vertex_count || max_grid - min_grid <= 1)
  1261. break;
  1262. // we start by using interpolation search - it usually converges faster
  1263. // however, interpolation search has a worst case of O(N) so we switch to binary search after a few iterations which converges in O(logN)
  1264. next_grid_size = (pass < kInterpolationPasses) ? int(tip + 0.5f) : (min_grid + max_grid) / 2;
  1265. }
  1266. if (min_vertices == 0)
  1267. return 0;
  1268. // build vertex->cell association by mapping all vertices with the same quantized position to the same cell
  1269. unsigned int* vertex_cells = allocator.allocate<unsigned int>(vertex_count);
  1270. computeVertexIds(vertex_ids, vertex_positions, vertex_count, min_grid);
  1271. size_t cell_count = fillVertexCells(table, table_size, vertex_cells, vertex_ids, vertex_count);
  1272. // build a quadric for each target cell
  1273. Quadric* cell_quadrics = allocator.allocate<Quadric>(cell_count);
  1274. memset(cell_quadrics, 0, cell_count * sizeof(Quadric));
  1275. fillCellQuadrics(cell_quadrics, vertex_positions, vertex_count, vertex_cells);
  1276. // for each target cell, find the vertex with the minimal error
  1277. unsigned int* cell_remap = allocator.allocate<unsigned int>(cell_count);
  1278. float* cell_errors = allocator.allocate<float>(cell_count);
  1279. fillCellRemap(cell_remap, cell_errors, cell_count, vertex_cells, cell_quadrics, vertex_positions, vertex_count);
  1280. // copy results to the output
  1281. assert(cell_count <= target_vertex_count);
  1282. memcpy(destination, cell_remap, sizeof(unsigned int) * cell_count);
  1283. #if TRACE
  1284. printf("result: %d cells\n", int(cell_count));
  1285. #endif
  1286. return cell_count;
  1287. }
  1288. float meshopt_simplifyScale(const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride)
  1289. {
  1290. using namespace meshopt;
  1291. assert(vertex_positions_stride > 0 && vertex_positions_stride <= 256);
  1292. assert(vertex_positions_stride % sizeof(float) == 0);
  1293. float extent = rescalePositions(NULL, vertex_positions, vertex_count, vertex_positions_stride);
  1294. return extent;
  1295. }