simplifier.cpp 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546
  1. // This file is part of meshoptimizer library; see meshoptimizer.h for version/license details
  2. #include "meshoptimizer.h"
  3. #include <assert.h>
  4. #include <float.h>
  5. #include <math.h>
  6. #include <string.h>
  7. #ifndef TRACE
  8. #define TRACE 0
  9. #endif
  10. #if TRACE
  11. #include <stdio.h>
  12. #endif
  13. // This work is based on:
  14. // Michael Garland and Paul S. Heckbert. Surface simplification using quadric error metrics. 1997
  15. // Michael Garland. Quadric-based polygonal surface simplification. 1999
  16. // Peter Lindstrom. Out-of-Core Simplification of Large Polygonal Models. 2000
  17. // Matthias Teschner, Bruno Heidelberger, Matthias Mueller, Danat Pomeranets, Markus Gross. Optimized Spatial Hashing for Collision Detection of Deformable Objects. 2003
  18. // Peter Van Sandt, Yannis Chronis, Jignesh M. Patel. Efficiently Searching In-Memory Sorted Arrays: Revenge of the Interpolation Search? 2019
  19. namespace meshopt
  20. {
  21. struct EdgeAdjacency
  22. {
  23. unsigned int* counts;
  24. unsigned int* offsets;
  25. unsigned int* data;
  26. };
  27. static void buildEdgeAdjacency(EdgeAdjacency& adjacency, const unsigned int* indices, size_t index_count, size_t vertex_count, meshopt_Allocator& allocator)
  28. {
  29. size_t face_count = index_count / 3;
  30. // allocate arrays
  31. adjacency.counts = allocator.allocate<unsigned int>(vertex_count);
  32. adjacency.offsets = allocator.allocate<unsigned int>(vertex_count);
  33. adjacency.data = allocator.allocate<unsigned int>(index_count);
  34. // fill edge counts
  35. memset(adjacency.counts, 0, vertex_count * sizeof(unsigned int));
  36. for (size_t i = 0; i < index_count; ++i)
  37. {
  38. assert(indices[i] < vertex_count);
  39. adjacency.counts[indices[i]]++;
  40. }
  41. // fill offset table
  42. unsigned int offset = 0;
  43. for (size_t i = 0; i < vertex_count; ++i)
  44. {
  45. adjacency.offsets[i] = offset;
  46. offset += adjacency.counts[i];
  47. }
  48. assert(offset == index_count);
  49. // fill edge data
  50. for (size_t i = 0; i < face_count; ++i)
  51. {
  52. unsigned int a = indices[i * 3 + 0], b = indices[i * 3 + 1], c = indices[i * 3 + 2];
  53. adjacency.data[adjacency.offsets[a]++] = b;
  54. adjacency.data[adjacency.offsets[b]++] = c;
  55. adjacency.data[adjacency.offsets[c]++] = a;
  56. }
  57. // fix offsets that have been disturbed by the previous pass
  58. for (size_t i = 0; i < vertex_count; ++i)
  59. {
  60. assert(adjacency.offsets[i] >= adjacency.counts[i]);
  61. adjacency.offsets[i] -= adjacency.counts[i];
  62. }
  63. }
  64. struct PositionHasher
  65. {
  66. const float* vertex_positions;
  67. size_t vertex_stride_float;
  68. size_t hash(unsigned int index) const
  69. {
  70. // MurmurHash2
  71. const unsigned int m = 0x5bd1e995;
  72. const int r = 24;
  73. unsigned int h = 0;
  74. const unsigned int* key = reinterpret_cast<const unsigned int*>(vertex_positions + index * vertex_stride_float);
  75. for (size_t i = 0; i < 3; ++i)
  76. {
  77. unsigned int k = key[i];
  78. k *= m;
  79. k ^= k >> r;
  80. k *= m;
  81. h *= m;
  82. h ^= k;
  83. }
  84. return h;
  85. }
  86. bool equal(unsigned int lhs, unsigned int rhs) const
  87. {
  88. return memcmp(vertex_positions + lhs * vertex_stride_float, vertex_positions + rhs * vertex_stride_float, sizeof(float) * 3) == 0;
  89. }
  90. };
  91. static size_t hashBuckets2(size_t count)
  92. {
  93. size_t buckets = 1;
  94. while (buckets < count)
  95. buckets *= 2;
  96. return buckets;
  97. }
  98. template <typename T, typename Hash>
  99. static T* hashLookup2(T* table, size_t buckets, const Hash& hash, const T& key, const T& empty)
  100. {
  101. assert(buckets > 0);
  102. assert((buckets & (buckets - 1)) == 0);
  103. size_t hashmod = buckets - 1;
  104. size_t bucket = hash.hash(key) & hashmod;
  105. for (size_t probe = 0; probe <= hashmod; ++probe)
  106. {
  107. T& item = table[bucket];
  108. if (item == empty)
  109. return &item;
  110. if (hash.equal(item, key))
  111. return &item;
  112. // hash collision, quadratic probing
  113. bucket = (bucket + probe + 1) & hashmod;
  114. }
  115. assert(false && "Hash table is full"); // unreachable
  116. return 0;
  117. }
  118. static void buildPositionRemap(unsigned int* remap, unsigned int* wedge, const float* vertex_positions_data, size_t vertex_count, size_t vertex_positions_stride, meshopt_Allocator& allocator)
  119. {
  120. PositionHasher hasher = {vertex_positions_data, vertex_positions_stride / sizeof(float)};
  121. size_t table_size = hashBuckets2(vertex_count);
  122. unsigned int* table = allocator.allocate<unsigned int>(table_size);
  123. memset(table, -1, table_size * sizeof(unsigned int));
  124. // build forward remap: for each vertex, which other (canonical) vertex does it map to?
  125. // we use position equivalence for this, and remap vertices to other existing vertices
  126. for (size_t i = 0; i < vertex_count; ++i)
  127. {
  128. unsigned int index = unsigned(i);
  129. unsigned int* entry = hashLookup2(table, table_size, hasher, index, ~0u);
  130. if (*entry == ~0u)
  131. *entry = index;
  132. remap[index] = *entry;
  133. }
  134. // build wedge table: for each vertex, which other vertex is the next wedge that also maps to the same vertex?
  135. // entries in table form a (cyclic) wedge loop per vertex; for manifold vertices, wedge[i] == remap[i] == i
  136. for (size_t i = 0; i < vertex_count; ++i)
  137. wedge[i] = unsigned(i);
  138. for (size_t i = 0; i < vertex_count; ++i)
  139. if (remap[i] != i)
  140. {
  141. unsigned int r = remap[i];
  142. wedge[i] = wedge[r];
  143. wedge[r] = unsigned(i);
  144. }
  145. }
  146. enum VertexKind
  147. {
  148. Kind_Manifold, // not on an attribute seam, not on any boundary
  149. Kind_Border, // not on an attribute seam, has exactly two open edges
  150. Kind_Seam, // on an attribute seam with exactly two attribute seam edges
  151. Kind_Complex, // none of the above; these vertices can move as long as all wedges move to the target vertex
  152. Kind_Locked, // none of the above; these vertices can't move
  153. Kind_Count
  154. };
  155. // manifold vertices can collapse onto anything
  156. // border/seam vertices can only be collapsed onto border/seam respectively
  157. // complex vertices can collapse onto complex/locked
  158. // a rule of thumb is that collapsing kind A into kind B preserves the kind B in the target vertex
  159. // for example, while we could collapse Complex into Manifold, this would mean the target vertex isn't Manifold anymore
  160. const unsigned char kCanCollapse[Kind_Count][Kind_Count] = {
  161. {1, 1, 1, 1, 1},
  162. {0, 1, 0, 0, 0},
  163. {0, 0, 1, 0, 0},
  164. {0, 0, 0, 1, 1},
  165. {0, 0, 0, 0, 0},
  166. };
  167. // if a vertex is manifold or seam, adjoining edges are guaranteed to have an opposite edge
  168. // note that for seam edges, the opposite edge isn't present in the attribute-based topology
  169. // but is present if you consider a position-only mesh variant
  170. const unsigned char kHasOpposite[Kind_Count][Kind_Count] = {
  171. {1, 1, 1, 0, 1},
  172. {1, 0, 1, 0, 0},
  173. {1, 1, 1, 0, 1},
  174. {0, 0, 0, 0, 0},
  175. {1, 0, 1, 0, 0},
  176. };
  177. static bool hasEdge(const EdgeAdjacency& adjacency, unsigned int a, unsigned int b)
  178. {
  179. unsigned int count = adjacency.counts[a];
  180. const unsigned int* data = adjacency.data + adjacency.offsets[a];
  181. for (size_t i = 0; i < count; ++i)
  182. if (data[i] == b)
  183. return true;
  184. return false;
  185. }
  186. static unsigned int findWedgeEdge(const EdgeAdjacency& adjacency, const unsigned int* wedge, unsigned int a, unsigned int b)
  187. {
  188. unsigned int v = a;
  189. do
  190. {
  191. if (hasEdge(adjacency, v, b))
  192. return v;
  193. v = wedge[v];
  194. } while (v != a);
  195. return ~0u;
  196. }
  197. static size_t countOpenEdges(const EdgeAdjacency& adjacency, unsigned int vertex, unsigned int* last = 0)
  198. {
  199. size_t result = 0;
  200. unsigned int count = adjacency.counts[vertex];
  201. const unsigned int* data = adjacency.data + adjacency.offsets[vertex];
  202. for (size_t i = 0; i < count; ++i)
  203. if (!hasEdge(adjacency, data[i], vertex))
  204. {
  205. result++;
  206. if (last)
  207. *last = data[i];
  208. }
  209. return result;
  210. }
  211. static void classifyVertices(unsigned char* result, unsigned int* loop, size_t vertex_count, const EdgeAdjacency& adjacency, const unsigned int* remap, const unsigned int* wedge)
  212. {
  213. for (size_t i = 0; i < vertex_count; ++i)
  214. loop[i] = ~0u;
  215. #if TRACE
  216. size_t lockedstats[4] = {};
  217. #define TRACELOCKED(i) lockedstats[i]++;
  218. #else
  219. #define TRACELOCKED(i) (void)0
  220. #endif
  221. for (size_t i = 0; i < vertex_count; ++i)
  222. {
  223. if (remap[i] == i)
  224. {
  225. if (wedge[i] == i)
  226. {
  227. // no attribute seam, need to check if it's manifold
  228. unsigned int v = 0;
  229. size_t edges = countOpenEdges(adjacency, unsigned(i), &v);
  230. // note: we classify any vertices with no open edges as manifold
  231. // this is technically incorrect - if 4 triangles share an edge, we'll classify vertices as manifold
  232. // it's unclear if this is a problem in practice
  233. // also note that we classify vertices as border if they have *one* open edge, not two
  234. // this is because we only have half-edges - so a border vertex would have one incoming and one outgoing edge
  235. if (edges == 0)
  236. {
  237. result[i] = Kind_Manifold;
  238. }
  239. else if (edges == 1)
  240. {
  241. result[i] = Kind_Border;
  242. loop[i] = v;
  243. }
  244. else
  245. {
  246. result[i] = Kind_Locked;
  247. TRACELOCKED(0);
  248. }
  249. }
  250. else if (wedge[wedge[i]] == i)
  251. {
  252. // attribute seam; need to distinguish between Seam and Locked
  253. unsigned int a = 0;
  254. size_t a_count = countOpenEdges(adjacency, unsigned(i), &a);
  255. unsigned int b = 0;
  256. size_t b_count = countOpenEdges(adjacency, wedge[i], &b);
  257. // seam should have one open half-edge for each vertex, and the edges need to "connect" - point to the same vertex post-remap
  258. if (a_count == 1 && b_count == 1)
  259. {
  260. unsigned int ao = findWedgeEdge(adjacency, wedge, a, wedge[i]);
  261. unsigned int bo = findWedgeEdge(adjacency, wedge, b, unsigned(i));
  262. if (ao != ~0u && bo != ~0u)
  263. {
  264. result[i] = Kind_Seam;
  265. loop[i] = a;
  266. loop[wedge[i]] = b;
  267. }
  268. else
  269. {
  270. result[i] = Kind_Locked;
  271. TRACELOCKED(1);
  272. }
  273. }
  274. else
  275. {
  276. result[i] = Kind_Locked;
  277. TRACELOCKED(2);
  278. }
  279. }
  280. else
  281. {
  282. // more than one vertex maps to this one; we don't have classification available
  283. result[i] = Kind_Locked;
  284. TRACELOCKED(3);
  285. }
  286. }
  287. else
  288. {
  289. assert(remap[i] < i);
  290. result[i] = result[remap[i]];
  291. }
  292. }
  293. #if TRACE
  294. printf("locked: many open edges %d, disconnected seam %d, many seam edges %d, many wedges %d\n",
  295. int(lockedstats[0]), int(lockedstats[1]), int(lockedstats[2]), int(lockedstats[3]));
  296. #endif
  297. }
  298. struct Vector3
  299. {
  300. float x, y, z;
  301. };
  302. static void rescalePositions(Vector3* result, const float* vertex_positions_data, size_t vertex_count, size_t vertex_positions_stride)
  303. {
  304. size_t vertex_stride_float = vertex_positions_stride / sizeof(float);
  305. float minv[3] = {FLT_MAX, FLT_MAX, FLT_MAX};
  306. float maxv[3] = {-FLT_MAX, -FLT_MAX, -FLT_MAX};
  307. for (size_t i = 0; i < vertex_count; ++i)
  308. {
  309. const float* v = vertex_positions_data + i * vertex_stride_float;
  310. result[i].x = v[0];
  311. result[i].y = v[1];
  312. result[i].z = v[2];
  313. for (int j = 0; j < 3; ++j)
  314. {
  315. float vj = v[j];
  316. minv[j] = minv[j] > vj ? vj : minv[j];
  317. maxv[j] = maxv[j] < vj ? vj : maxv[j];
  318. }
  319. }
  320. float extent = 0.f;
  321. extent = (maxv[0] - minv[0]) < extent ? extent : (maxv[0] - minv[0]);
  322. extent = (maxv[1] - minv[1]) < extent ? extent : (maxv[1] - minv[1]);
  323. extent = (maxv[2] - minv[2]) < extent ? extent : (maxv[2] - minv[2]);
  324. float scale = extent == 0 ? 0.f : 1.f / extent;
  325. for (size_t i = 0; i < vertex_count; ++i)
  326. {
  327. result[i].x = (result[i].x - minv[0]) * scale;
  328. result[i].y = (result[i].y - minv[1]) * scale;
  329. result[i].z = (result[i].z - minv[2]) * scale;
  330. }
  331. }
  332. struct Quadric
  333. {
  334. float a00, a11, a22;
  335. float a10, a20, a21;
  336. float b0, b1, b2, c;
  337. float w;
  338. };
  339. struct Collapse
  340. {
  341. unsigned int v0;
  342. unsigned int v1;
  343. union {
  344. unsigned int bidi;
  345. float error;
  346. unsigned int errorui;
  347. };
  348. };
  349. static float normalize(Vector3& v)
  350. {
  351. float length = sqrtf(v.x * v.x + v.y * v.y + v.z * v.z);
  352. if (length > 0)
  353. {
  354. v.x /= length;
  355. v.y /= length;
  356. v.z /= length;
  357. }
  358. return length;
  359. }
  360. static void quadricAdd(Quadric& Q, const Quadric& R)
  361. {
  362. Q.a00 += R.a00;
  363. Q.a11 += R.a11;
  364. Q.a22 += R.a22;
  365. Q.a10 += R.a10;
  366. Q.a20 += R.a20;
  367. Q.a21 += R.a21;
  368. Q.b0 += R.b0;
  369. Q.b1 += R.b1;
  370. Q.b2 += R.b2;
  371. Q.c += R.c;
  372. Q.w += R.w;
  373. }
  374. static float quadricError(const Quadric& Q, const Vector3& v)
  375. {
  376. float rx = Q.b0;
  377. float ry = Q.b1;
  378. float rz = Q.b2;
  379. rx += Q.a10 * v.y;
  380. ry += Q.a21 * v.z;
  381. rz += Q.a20 * v.x;
  382. rx *= 2;
  383. ry *= 2;
  384. rz *= 2;
  385. rx += Q.a00 * v.x;
  386. ry += Q.a11 * v.y;
  387. rz += Q.a22 * v.z;
  388. float r = Q.c;
  389. r += rx * v.x;
  390. r += ry * v.y;
  391. r += rz * v.z;
  392. float s = Q.w == 0.f ? 0.f : 1.f / Q.w;
  393. return fabsf(r) * s;
  394. }
  395. static void quadricFromPlane(Quadric& Q, float a, float b, float c, float d, float w)
  396. {
  397. float aw = a * w;
  398. float bw = b * w;
  399. float cw = c * w;
  400. float dw = d * w;
  401. Q.a00 = a * aw;
  402. Q.a11 = b * bw;
  403. Q.a22 = c * cw;
  404. Q.a10 = a * bw;
  405. Q.a20 = a * cw;
  406. Q.a21 = b * cw;
  407. Q.b0 = a * dw;
  408. Q.b1 = b * dw;
  409. Q.b2 = c * dw;
  410. Q.c = d * dw;
  411. Q.w = w;
  412. }
  413. static void quadricFromPoint(Quadric& Q, float x, float y, float z, float w)
  414. {
  415. // we need to encode (x - X) ^ 2 + (y - Y)^2 + (z - Z)^2 into the quadric
  416. Q.a00 = w;
  417. Q.a11 = w;
  418. Q.a22 = w;
  419. Q.a10 = 0.f;
  420. Q.a20 = 0.f;
  421. Q.a21 = 0.f;
  422. Q.b0 = -2.f * x * w;
  423. Q.b1 = -2.f * y * w;
  424. Q.b2 = -2.f * z * w;
  425. Q.c = (x * x + y * y + z * z) * w;
  426. Q.w = w;
  427. }
  428. static void quadricFromTriangle(Quadric& Q, const Vector3& p0, const Vector3& p1, const Vector3& p2, float weight)
  429. {
  430. Vector3 p10 = {p1.x - p0.x, p1.y - p0.y, p1.z - p0.z};
  431. Vector3 p20 = {p2.x - p0.x, p2.y - p0.y, p2.z - p0.z};
  432. // normal = cross(p1 - p0, p2 - p0)
  433. Vector3 normal = {p10.y * p20.z - p10.z * p20.y, p10.z * p20.x - p10.x * p20.z, p10.x * p20.y - p10.y * p20.x};
  434. float area = normalize(normal);
  435. float distance = normal.x * p0.x + normal.y * p0.y + normal.z * p0.z;
  436. // we use sqrtf(area) so that the error is scaled linearly; this tends to improve silhouettes
  437. quadricFromPlane(Q, normal.x, normal.y, normal.z, -distance, sqrtf(area) * weight);
  438. }
  439. static void quadricFromTriangleEdge(Quadric& Q, const Vector3& p0, const Vector3& p1, const Vector3& p2, float weight)
  440. {
  441. Vector3 p10 = {p1.x - p0.x, p1.y - p0.y, p1.z - p0.z};
  442. float length = normalize(p10);
  443. // p20p = length of projection of p2-p0 onto normalize(p1 - p0)
  444. Vector3 p20 = {p2.x - p0.x, p2.y - p0.y, p2.z - p0.z};
  445. float p20p = p20.x * p10.x + p20.y * p10.y + p20.z * p10.z;
  446. // normal = altitude of triangle from point p2 onto edge p1-p0
  447. Vector3 normal = {p20.x - p10.x * p20p, p20.y - p10.y * p20p, p20.z - p10.z * p20p};
  448. normalize(normal);
  449. float distance = normal.x * p0.x + normal.y * p0.y + normal.z * p0.z;
  450. // note: the weight is scaled linearly with edge length; this has to match the triangle weight
  451. quadricFromPlane(Q, normal.x, normal.y, normal.z, -distance, length * weight);
  452. }
  453. static void fillFaceQuadrics(Quadric* vertex_quadrics, const unsigned int* indices, size_t index_count, const Vector3* vertex_positions, const unsigned int* remap)
  454. {
  455. for (size_t i = 0; i < index_count; i += 3)
  456. {
  457. unsigned int i0 = indices[i + 0];
  458. unsigned int i1 = indices[i + 1];
  459. unsigned int i2 = indices[i + 2];
  460. Quadric Q;
  461. quadricFromTriangle(Q, vertex_positions[i0], vertex_positions[i1], vertex_positions[i2], 1.f);
  462. quadricAdd(vertex_quadrics[remap[i0]], Q);
  463. quadricAdd(vertex_quadrics[remap[i1]], Q);
  464. quadricAdd(vertex_quadrics[remap[i2]], Q);
  465. }
  466. }
  467. static void fillEdgeQuadrics(Quadric* vertex_quadrics, const unsigned int* indices, size_t index_count, const Vector3* vertex_positions, const unsigned int* remap, const unsigned char* vertex_kind, const unsigned int* loop)
  468. {
  469. for (size_t i = 0; i < index_count; i += 3)
  470. {
  471. static const int next[3] = {1, 2, 0};
  472. for (int e = 0; e < 3; ++e)
  473. {
  474. unsigned int i0 = indices[i + e];
  475. unsigned int i1 = indices[i + next[e]];
  476. unsigned char k0 = vertex_kind[i0];
  477. unsigned char k1 = vertex_kind[i1];
  478. // check that i0 and i1 are border/seam and are on the same edge loop
  479. // loop[] tracks half edges so we only need to check i0->i1
  480. if (k0 != k1 || (k0 != Kind_Border && k0 != Kind_Seam) || loop[i0] != i1)
  481. continue;
  482. unsigned int i2 = indices[i + next[next[e]]];
  483. // we try hard to maintain border edge geometry; seam edges can move more freely
  484. // due to topological restrictions on collapses, seam quadrics slightly improves collapse structure but aren't critical
  485. const float kEdgeWeightSeam = 1.f;
  486. const float kEdgeWeightBorder = 10.f;
  487. float edgeWeight = (k0 == Kind_Seam) ? kEdgeWeightSeam : kEdgeWeightBorder;
  488. Quadric Q;
  489. quadricFromTriangleEdge(Q, vertex_positions[i0], vertex_positions[i1], vertex_positions[i2], edgeWeight);
  490. quadricAdd(vertex_quadrics[remap[i0]], Q);
  491. quadricAdd(vertex_quadrics[remap[i1]], Q);
  492. }
  493. }
  494. }
  495. static size_t pickEdgeCollapses(Collapse* collapses, const unsigned int* indices, size_t index_count, const unsigned int* remap, const unsigned char* vertex_kind, const unsigned int* loop)
  496. {
  497. size_t collapse_count = 0;
  498. for (size_t i = 0; i < index_count; i += 3)
  499. {
  500. static const int next[3] = {1, 2, 0};
  501. for (int e = 0; e < 3; ++e)
  502. {
  503. unsigned int i0 = indices[i + e];
  504. unsigned int i1 = indices[i + next[e]];
  505. // this can happen either when input has a zero-length edge, or when we perform collapses for complex
  506. // topology w/seams and collapse a manifold vertex that connects to both wedges onto one of them
  507. // we leave edges like this alone since they may be important for preserving mesh integrity
  508. if (remap[i0] == remap[i1])
  509. continue;
  510. unsigned char k0 = vertex_kind[i0];
  511. unsigned char k1 = vertex_kind[i1];
  512. // the edge has to be collapsible in at least one direction
  513. if (!(kCanCollapse[k0][k1] | kCanCollapse[k1][k0]))
  514. continue;
  515. // manifold and seam edges should occur twice (i0->i1 and i1->i0) - skip redundant edges
  516. if (kHasOpposite[k0][k1] && remap[i1] > remap[i0])
  517. continue;
  518. // two vertices are on a border or a seam, but there's no direct edge between them
  519. // this indicates that they belong to two different edge loops and we should not collapse this edge
  520. // loop[] tracks half edges so we only need to check i0->i1
  521. if (k0 == k1 && (k0 == Kind_Border || k0 == Kind_Seam) && loop[i0] != i1)
  522. continue;
  523. // edge can be collapsed in either direction - we will pick the one with minimum error
  524. // note: we evaluate error later during collapse ranking, here we just tag the edge as bidirectional
  525. if (kCanCollapse[k0][k1] & kCanCollapse[k1][k0])
  526. {
  527. Collapse c = {i0, i1, {/* bidi= */ 1}};
  528. collapses[collapse_count++] = c;
  529. }
  530. else
  531. {
  532. // edge can only be collapsed in one direction
  533. unsigned int e0 = kCanCollapse[k0][k1] ? i0 : i1;
  534. unsigned int e1 = kCanCollapse[k0][k1] ? i1 : i0;
  535. Collapse c = {e0, e1, {/* bidi= */ 0}};
  536. collapses[collapse_count++] = c;
  537. }
  538. }
  539. }
  540. return collapse_count;
  541. }
  542. static void rankEdgeCollapses(Collapse* collapses, size_t collapse_count, const Vector3* vertex_positions, const Quadric* vertex_quadrics, const unsigned int* remap)
  543. {
  544. for (size_t i = 0; i < collapse_count; ++i)
  545. {
  546. Collapse& c = collapses[i];
  547. unsigned int i0 = c.v0;
  548. unsigned int i1 = c.v1;
  549. // most edges are bidirectional which means we need to evaluate errors for two collapses
  550. // to keep this code branchless we just use the same edge for unidirectional edges
  551. unsigned int j0 = c.bidi ? i1 : i0;
  552. unsigned int j1 = c.bidi ? i0 : i1;
  553. const Quadric& qi = vertex_quadrics[remap[i0]];
  554. const Quadric& qj = vertex_quadrics[remap[j0]];
  555. float ei = quadricError(qi, vertex_positions[i1]);
  556. float ej = quadricError(qj, vertex_positions[j1]);
  557. // pick edge direction with minimal error
  558. c.v0 = ei <= ej ? i0 : j0;
  559. c.v1 = ei <= ej ? i1 : j1;
  560. c.error = ei <= ej ? ei : ej;
  561. }
  562. }
  563. #if TRACE > 1
  564. static void dumpEdgeCollapses(const Collapse* collapses, size_t collapse_count, const unsigned char* vertex_kind)
  565. {
  566. size_t ckinds[Kind_Count][Kind_Count] = {};
  567. float cerrors[Kind_Count][Kind_Count] = {};
  568. for (int k0 = 0; k0 < Kind_Count; ++k0)
  569. for (int k1 = 0; k1 < Kind_Count; ++k1)
  570. cerrors[k0][k1] = FLT_MAX;
  571. for (size_t i = 0; i < collapse_count; ++i)
  572. {
  573. unsigned int i0 = collapses[i].v0;
  574. unsigned int i1 = collapses[i].v1;
  575. unsigned char k0 = vertex_kind[i0];
  576. unsigned char k1 = vertex_kind[i1];
  577. ckinds[k0][k1]++;
  578. cerrors[k0][k1] = (collapses[i].error < cerrors[k0][k1]) ? collapses[i].error : cerrors[k0][k1];
  579. }
  580. for (int k0 = 0; k0 < Kind_Count; ++k0)
  581. for (int k1 = 0; k1 < Kind_Count; ++k1)
  582. if (ckinds[k0][k1])
  583. printf("collapses %d -> %d: %d, min error %e\n", k0, k1, int(ckinds[k0][k1]), cerrors[k0][k1]);
  584. }
  585. static void dumpLockedCollapses(const unsigned int* indices, size_t index_count, const unsigned char* vertex_kind)
  586. {
  587. size_t locked_collapses[Kind_Count][Kind_Count] = {};
  588. for (size_t i = 0; i < index_count; i += 3)
  589. {
  590. static const int next[3] = {1, 2, 0};
  591. for (int e = 0; e < 3; ++e)
  592. {
  593. unsigned int i0 = indices[i + e];
  594. unsigned int i1 = indices[i + next[e]];
  595. unsigned char k0 = vertex_kind[i0];
  596. unsigned char k1 = vertex_kind[i1];
  597. locked_collapses[k0][k1] += !kCanCollapse[k0][k1] && !kCanCollapse[k1][k0];
  598. }
  599. }
  600. for (int k0 = 0; k0 < Kind_Count; ++k0)
  601. for (int k1 = 0; k1 < Kind_Count; ++k1)
  602. if (locked_collapses[k0][k1])
  603. printf("locked collapses %d -> %d: %d\n", k0, k1, int(locked_collapses[k0][k1]));
  604. }
  605. #endif
  606. static void sortEdgeCollapses(unsigned int* sort_order, const Collapse* collapses, size_t collapse_count)
  607. {
  608. const int sort_bits = 11;
  609. // fill histogram for counting sort
  610. unsigned int histogram[1 << sort_bits];
  611. memset(histogram, 0, sizeof(histogram));
  612. for (size_t i = 0; i < collapse_count; ++i)
  613. {
  614. // skip sign bit since error is non-negative
  615. unsigned int key = (collapses[i].errorui << 1) >> (32 - sort_bits);
  616. histogram[key]++;
  617. }
  618. // compute offsets based on histogram data
  619. size_t histogram_sum = 0;
  620. for (size_t i = 0; i < 1 << sort_bits; ++i)
  621. {
  622. size_t count = histogram[i];
  623. histogram[i] = unsigned(histogram_sum);
  624. histogram_sum += count;
  625. }
  626. assert(histogram_sum == collapse_count);
  627. // compute sort order based on offsets
  628. for (size_t i = 0; i < collapse_count; ++i)
  629. {
  630. // skip sign bit since error is non-negative
  631. unsigned int key = (collapses[i].errorui << 1) >> (32 - sort_bits);
  632. sort_order[histogram[key]++] = unsigned(i);
  633. }
  634. }
  635. static size_t performEdgeCollapses(unsigned int* collapse_remap, unsigned char* collapse_locked, Quadric* vertex_quadrics, const Collapse* collapses, size_t collapse_count, const unsigned int* collapse_order, const unsigned int* remap, const unsigned int* wedge, const unsigned char* vertex_kind, size_t triangle_collapse_goal, float error_goal, float error_limit)
  636. {
  637. size_t edge_collapses = 0;
  638. size_t triangle_collapses = 0;
  639. for (size_t i = 0; i < collapse_count; ++i)
  640. {
  641. const Collapse& c = collapses[collapse_order[i]];
  642. if (c.error > error_limit)
  643. break;
  644. if (c.error > error_goal && triangle_collapses > triangle_collapse_goal / 10)
  645. break;
  646. if (triangle_collapses >= triangle_collapse_goal)
  647. break;
  648. unsigned int i0 = c.v0;
  649. unsigned int i1 = c.v1;
  650. unsigned int r0 = remap[i0];
  651. unsigned int r1 = remap[i1];
  652. // we don't collapse vertices that had source or target vertex involved in a collapse
  653. // it's important to not move the vertices twice since it complicates the tracking/remapping logic
  654. // it's important to not move other vertices towards a moved vertex to preserve error since we don't re-rank collapses mid-pass
  655. if (collapse_locked[r0] | collapse_locked[r1])
  656. continue;
  657. assert(collapse_remap[r0] == r0);
  658. assert(collapse_remap[r1] == r1);
  659. quadricAdd(vertex_quadrics[r1], vertex_quadrics[r0]);
  660. if (vertex_kind[i0] == Kind_Complex)
  661. {
  662. unsigned int v = i0;
  663. do
  664. {
  665. collapse_remap[v] = r1;
  666. v = wedge[v];
  667. } while (v != i0);
  668. }
  669. else if (vertex_kind[i0] == Kind_Seam)
  670. {
  671. // remap v0 to v1 and seam pair of v0 to seam pair of v1
  672. unsigned int s0 = wedge[i0];
  673. unsigned int s1 = wedge[i1];
  674. assert(s0 != i0 && s1 != i1);
  675. assert(wedge[s0] == i0 && wedge[s1] == i1);
  676. collapse_remap[i0] = i1;
  677. collapse_remap[s0] = s1;
  678. }
  679. else
  680. {
  681. assert(wedge[i0] == i0);
  682. collapse_remap[i0] = i1;
  683. }
  684. collapse_locked[r0] = 1;
  685. collapse_locked[r1] = 1;
  686. // border edges collapse 1 triangle, other edges collapse 2 or more
  687. triangle_collapses += (vertex_kind[i0] == Kind_Border) ? 1 : 2;
  688. edge_collapses++;
  689. }
  690. return edge_collapses;
  691. }
  692. static size_t remapIndexBuffer(unsigned int* indices, size_t index_count, const unsigned int* collapse_remap)
  693. {
  694. size_t write = 0;
  695. for (size_t i = 0; i < index_count; i += 3)
  696. {
  697. unsigned int v0 = collapse_remap[indices[i + 0]];
  698. unsigned int v1 = collapse_remap[indices[i + 1]];
  699. unsigned int v2 = collapse_remap[indices[i + 2]];
  700. // we never move the vertex twice during a single pass
  701. assert(collapse_remap[v0] == v0);
  702. assert(collapse_remap[v1] == v1);
  703. assert(collapse_remap[v2] == v2);
  704. if (v0 != v1 && v0 != v2 && v1 != v2)
  705. {
  706. indices[write + 0] = v0;
  707. indices[write + 1] = v1;
  708. indices[write + 2] = v2;
  709. write += 3;
  710. }
  711. }
  712. return write;
  713. }
  714. static void remapEdgeLoops(unsigned int* loop, size_t vertex_count, const unsigned int* collapse_remap)
  715. {
  716. for (size_t i = 0; i < vertex_count; ++i)
  717. {
  718. if (loop[i] != ~0u)
  719. {
  720. unsigned int l = loop[i];
  721. unsigned int r = collapse_remap[l];
  722. // i == r is a special case when the seam edge is collapsed in a direction opposite to where loop goes
  723. loop[i] = (i == r) ? loop[l] : r;
  724. }
  725. }
  726. }
  727. struct CellHasher
  728. {
  729. const unsigned int* vertex_ids;
  730. size_t hash(unsigned int i) const
  731. {
  732. unsigned int h = vertex_ids[i];
  733. // MurmurHash2 finalizer
  734. h ^= h >> 13;
  735. h *= 0x5bd1e995;
  736. h ^= h >> 15;
  737. return h;
  738. }
  739. bool equal(unsigned int lhs, unsigned int rhs) const
  740. {
  741. return vertex_ids[lhs] == vertex_ids[rhs];
  742. }
  743. };
  744. struct IdHasher
  745. {
  746. size_t hash(unsigned int id) const
  747. {
  748. unsigned int h = id;
  749. // MurmurHash2 finalizer
  750. h ^= h >> 13;
  751. h *= 0x5bd1e995;
  752. h ^= h >> 15;
  753. return h;
  754. }
  755. bool equal(unsigned int lhs, unsigned int rhs) const
  756. {
  757. return lhs == rhs;
  758. }
  759. };
  760. struct TriangleHasher
  761. {
  762. unsigned int* indices;
  763. size_t hash(unsigned int i) const
  764. {
  765. const unsigned int* tri = indices + i * 3;
  766. // Optimized Spatial Hashing for Collision Detection of Deformable Objects
  767. return (tri[0] * 73856093) ^ (tri[1] * 19349663) ^ (tri[2] * 83492791);
  768. }
  769. bool equal(unsigned int lhs, unsigned int rhs) const
  770. {
  771. const unsigned int* lt = indices + lhs * 3;
  772. const unsigned int* rt = indices + rhs * 3;
  773. return lt[0] == rt[0] && lt[1] == rt[1] && lt[2] == rt[2];
  774. }
  775. };
  776. static void computeVertexIds(unsigned int* vertex_ids, const Vector3* vertex_positions, size_t vertex_count, int grid_size)
  777. {
  778. assert(grid_size >= 1 && grid_size <= 1024);
  779. float cell_scale = float(grid_size - 1);
  780. for (size_t i = 0; i < vertex_count; ++i)
  781. {
  782. const Vector3& v = vertex_positions[i];
  783. int xi = int(v.x * cell_scale + 0.5f);
  784. int yi = int(v.y * cell_scale + 0.5f);
  785. int zi = int(v.z * cell_scale + 0.5f);
  786. vertex_ids[i] = (xi << 20) | (yi << 10) | zi;
  787. }
  788. }
  789. static size_t countTriangles(const unsigned int* vertex_ids, const unsigned int* indices, size_t index_count)
  790. {
  791. size_t result = 0;
  792. for (size_t i = 0; i < index_count; i += 3)
  793. {
  794. unsigned int id0 = vertex_ids[indices[i + 0]];
  795. unsigned int id1 = vertex_ids[indices[i + 1]];
  796. unsigned int id2 = vertex_ids[indices[i + 2]];
  797. result += (id0 != id1) & (id0 != id2) & (id1 != id2);
  798. }
  799. return result;
  800. }
  801. static size_t fillVertexCells(unsigned int* table, size_t table_size, unsigned int* vertex_cells, const unsigned int* vertex_ids, size_t vertex_count)
  802. {
  803. CellHasher hasher = {vertex_ids};
  804. memset(table, -1, table_size * sizeof(unsigned int));
  805. size_t result = 0;
  806. for (size_t i = 0; i < vertex_count; ++i)
  807. {
  808. unsigned int* entry = hashLookup2(table, table_size, hasher, unsigned(i), ~0u);
  809. if (*entry == ~0u)
  810. {
  811. *entry = unsigned(i);
  812. vertex_cells[i] = unsigned(result++);
  813. }
  814. else
  815. {
  816. vertex_cells[i] = vertex_cells[*entry];
  817. }
  818. }
  819. return result;
  820. }
  821. static size_t countVertexCells(unsigned int* table, size_t table_size, const unsigned int* vertex_ids, size_t vertex_count)
  822. {
  823. IdHasher hasher;
  824. memset(table, -1, table_size * sizeof(unsigned int));
  825. size_t result = 0;
  826. for (size_t i = 0; i < vertex_count; ++i)
  827. {
  828. unsigned int id = vertex_ids[i];
  829. unsigned int* entry = hashLookup2(table, table_size, hasher, id, ~0u);
  830. result += (*entry == ~0u);
  831. *entry = id;
  832. }
  833. return result;
  834. }
  835. static void fillCellQuadrics(Quadric* cell_quadrics, const unsigned int* indices, size_t index_count, const Vector3* vertex_positions, const unsigned int* vertex_cells)
  836. {
  837. for (size_t i = 0; i < index_count; i += 3)
  838. {
  839. unsigned int i0 = indices[i + 0];
  840. unsigned int i1 = indices[i + 1];
  841. unsigned int i2 = indices[i + 2];
  842. unsigned int c0 = vertex_cells[i0];
  843. unsigned int c1 = vertex_cells[i1];
  844. unsigned int c2 = vertex_cells[i2];
  845. bool single_cell = (c0 == c1) & (c0 == c2);
  846. Quadric Q;
  847. quadricFromTriangle(Q, vertex_positions[i0], vertex_positions[i1], vertex_positions[i2], single_cell ? 3.f : 1.f);
  848. if (single_cell)
  849. {
  850. quadricAdd(cell_quadrics[c0], Q);
  851. }
  852. else
  853. {
  854. quadricAdd(cell_quadrics[c0], Q);
  855. quadricAdd(cell_quadrics[c1], Q);
  856. quadricAdd(cell_quadrics[c2], Q);
  857. }
  858. }
  859. }
  860. static void fillCellQuadrics(Quadric* cell_quadrics, const Vector3* vertex_positions, size_t vertex_count, const unsigned int* vertex_cells)
  861. {
  862. for (size_t i = 0; i < vertex_count; ++i)
  863. {
  864. unsigned int c = vertex_cells[i];
  865. const Vector3& v = vertex_positions[i];
  866. Quadric Q;
  867. quadricFromPoint(Q, v.x, v.y, v.z, 1.f);
  868. quadricAdd(cell_quadrics[c], Q);
  869. }
  870. }
  871. static void fillCellRemap(unsigned int* cell_remap, float* cell_errors, size_t cell_count, const unsigned int* vertex_cells, const Quadric* cell_quadrics, const Vector3* vertex_positions, size_t vertex_count)
  872. {
  873. memset(cell_remap, -1, cell_count * sizeof(unsigned int));
  874. for (size_t i = 0; i < vertex_count; ++i)
  875. {
  876. unsigned int cell = vertex_cells[i];
  877. float error = quadricError(cell_quadrics[cell], vertex_positions[i]);
  878. if (cell_remap[cell] == ~0u || cell_errors[cell] > error)
  879. {
  880. cell_remap[cell] = unsigned(i);
  881. cell_errors[cell] = error;
  882. }
  883. }
  884. }
  885. static size_t filterTriangles(unsigned int* destination, unsigned int* tritable, size_t tritable_size, const unsigned int* indices, size_t index_count, const unsigned int* vertex_cells, const unsigned int* cell_remap)
  886. {
  887. TriangleHasher hasher = {destination};
  888. memset(tritable, -1, tritable_size * sizeof(unsigned int));
  889. size_t result = 0;
  890. for (size_t i = 0; i < index_count; i += 3)
  891. {
  892. unsigned int c0 = vertex_cells[indices[i + 0]];
  893. unsigned int c1 = vertex_cells[indices[i + 1]];
  894. unsigned int c2 = vertex_cells[indices[i + 2]];
  895. if (c0 != c1 && c0 != c2 && c1 != c2)
  896. {
  897. unsigned int a = cell_remap[c0];
  898. unsigned int b = cell_remap[c1];
  899. unsigned int c = cell_remap[c2];
  900. if (b < a && b < c)
  901. {
  902. unsigned int t = a;
  903. a = b, b = c, c = t;
  904. }
  905. else if (c < a && c < b)
  906. {
  907. unsigned int t = c;
  908. c = b, b = a, a = t;
  909. }
  910. destination[result * 3 + 0] = a;
  911. destination[result * 3 + 1] = b;
  912. destination[result * 3 + 2] = c;
  913. unsigned int* entry = hashLookup2(tritable, tritable_size, hasher, unsigned(result), ~0u);
  914. if (*entry == ~0u)
  915. *entry = unsigned(result++);
  916. }
  917. }
  918. return result * 3;
  919. }
  920. static float interpolate(float y, float x0, float y0, float x1, float y1, float x2, float y2)
  921. {
  922. // three point interpolation from "revenge of interpolation search" paper
  923. float num = (y1 - y) * (x1 - x2) * (x1 - x0) * (y2 - y0);
  924. float den = (y2 - y) * (x1 - x2) * (y0 - y1) + (y0 - y) * (x1 - x0) * (y1 - y2);
  925. return x1 + num / den;
  926. }
  927. } // namespace meshopt
  928. #if TRACE
  929. unsigned char* meshopt_simplifyDebugKind = 0;
  930. unsigned int* meshopt_simplifyDebugLoop = 0;
  931. #endif
  932. size_t meshopt_simplify(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions_data, size_t vertex_count, size_t vertex_positions_stride, size_t target_index_count, float target_error)
  933. {
  934. using namespace meshopt;
  935. assert(index_count % 3 == 0);
  936. assert(vertex_positions_stride > 0 && vertex_positions_stride <= 256);
  937. assert(vertex_positions_stride % sizeof(float) == 0);
  938. assert(target_index_count <= index_count);
  939. meshopt_Allocator allocator;
  940. unsigned int* result = destination;
  941. // build adjacency information
  942. EdgeAdjacency adjacency = {};
  943. buildEdgeAdjacency(adjacency, indices, index_count, vertex_count, allocator);
  944. // build position remap that maps each vertex to the one with identical position
  945. unsigned int* remap = allocator.allocate<unsigned int>(vertex_count);
  946. unsigned int* wedge = allocator.allocate<unsigned int>(vertex_count);
  947. buildPositionRemap(remap, wedge, vertex_positions_data, vertex_count, vertex_positions_stride, allocator);
  948. // classify vertices; vertex kind determines collapse rules, see kCanCollapse
  949. unsigned char* vertex_kind = allocator.allocate<unsigned char>(vertex_count);
  950. unsigned int* loop = allocator.allocate<unsigned int>(vertex_count);
  951. classifyVertices(vertex_kind, loop, vertex_count, adjacency, remap, wedge);
  952. #if TRACE
  953. size_t unique_positions = 0;
  954. for (size_t i = 0; i < vertex_count; ++i)
  955. unique_positions += remap[i] == i;
  956. printf("position remap: %d vertices => %d positions\n", int(vertex_count), int(unique_positions));
  957. size_t kinds[Kind_Count] = {};
  958. for (size_t i = 0; i < vertex_count; ++i)
  959. kinds[vertex_kind[i]] += remap[i] == i;
  960. printf("kinds: manifold %d, border %d, seam %d, complex %d, locked %d\n",
  961. int(kinds[Kind_Manifold]), int(kinds[Kind_Border]), int(kinds[Kind_Seam]), int(kinds[Kind_Complex]), int(kinds[Kind_Locked]));
  962. #endif
  963. Vector3* vertex_positions = allocator.allocate<Vector3>(vertex_count);
  964. rescalePositions(vertex_positions, vertex_positions_data, vertex_count, vertex_positions_stride);
  965. Quadric* vertex_quadrics = allocator.allocate<Quadric>(vertex_count);
  966. memset(vertex_quadrics, 0, vertex_count * sizeof(Quadric));
  967. fillFaceQuadrics(vertex_quadrics, indices, index_count, vertex_positions, remap);
  968. fillEdgeQuadrics(vertex_quadrics, indices, index_count, vertex_positions, remap, vertex_kind, loop);
  969. if (result != indices)
  970. memcpy(result, indices, index_count * sizeof(unsigned int));
  971. #if TRACE
  972. size_t pass_count = 0;
  973. float worst_error = 0;
  974. #endif
  975. Collapse* edge_collapses = allocator.allocate<Collapse>(index_count);
  976. unsigned int* collapse_order = allocator.allocate<unsigned int>(index_count);
  977. unsigned int* collapse_remap = allocator.allocate<unsigned int>(vertex_count);
  978. unsigned char* collapse_locked = allocator.allocate<unsigned char>(vertex_count);
  979. size_t result_count = index_count;
  980. // target_error input is linear; we need to adjust it to match quadricError units
  981. float error_limit = target_error * target_error;
  982. while (result_count > target_index_count)
  983. {
  984. size_t edge_collapse_count = pickEdgeCollapses(edge_collapses, result, result_count, remap, vertex_kind, loop);
  985. // no edges can be collapsed any more due to topology restrictions
  986. if (edge_collapse_count == 0)
  987. break;
  988. rankEdgeCollapses(edge_collapses, edge_collapse_count, vertex_positions, vertex_quadrics, remap);
  989. #if TRACE > 1
  990. dumpEdgeCollapses(edge_collapses, edge_collapse_count, vertex_kind);
  991. #endif
  992. sortEdgeCollapses(collapse_order, edge_collapses, edge_collapse_count);
  993. // most collapses remove 2 triangles; use this to establish a bound on the pass in terms of error limit
  994. // note that edge_collapse_goal is an estimate; triangle_collapse_goal will be used to actually limit collapses
  995. size_t triangle_collapse_goal = (result_count - target_index_count) / 3;
  996. size_t edge_collapse_goal = triangle_collapse_goal / 2;
  997. // we limit the error in each pass based on the error of optimal last collapse; since many collapses will be locked
  998. // as they will share vertices with other successfull collapses, we need to increase the acceptable error by this factor
  999. const float kPassErrorBound = 1.5f;
  1000. float error_goal = edge_collapse_goal < edge_collapse_count ? edge_collapses[collapse_order[edge_collapse_goal]].error * kPassErrorBound : FLT_MAX;
  1001. for (size_t i = 0; i < vertex_count; ++i)
  1002. collapse_remap[i] = unsigned(i);
  1003. memset(collapse_locked, 0, vertex_count);
  1004. size_t collapses = performEdgeCollapses(collapse_remap, collapse_locked, vertex_quadrics, edge_collapses, edge_collapse_count, collapse_order, remap, wedge, vertex_kind, triangle_collapse_goal, error_goal, error_limit);
  1005. // no edges can be collapsed any more due to hitting the error limit or triangle collapse limit
  1006. if (collapses == 0)
  1007. break;
  1008. remapEdgeLoops(loop, vertex_count, collapse_remap);
  1009. size_t new_count = remapIndexBuffer(result, result_count, collapse_remap);
  1010. assert(new_count < result_count);
  1011. #if TRACE
  1012. float pass_error = 0.f;
  1013. for (size_t i = 0; i < edge_collapse_count; ++i)
  1014. {
  1015. Collapse& c = edge_collapses[collapse_order[i]];
  1016. if (collapse_remap[c.v0] == c.v1)
  1017. pass_error = c.error;
  1018. }
  1019. pass_count++;
  1020. worst_error = (worst_error < pass_error) ? pass_error : worst_error;
  1021. printf("pass %d: triangles: %d -> %d, collapses: %d/%d (goal: %d), error: %e (limit %e goal %e)\n", int(pass_count), int(result_count / 3), int(new_count / 3), int(collapses), int(edge_collapse_count), int(edge_collapse_goal), pass_error, error_limit, error_goal);
  1022. #endif
  1023. result_count = new_count;
  1024. }
  1025. #if TRACE
  1026. printf("passes: %d, worst error: %e\n", int(pass_count), worst_error);
  1027. #endif
  1028. #if TRACE > 1
  1029. dumpLockedCollapses(result, result_count, vertex_kind);
  1030. #endif
  1031. #if TRACE
  1032. if (meshopt_simplifyDebugKind)
  1033. memcpy(meshopt_simplifyDebugKind, vertex_kind, vertex_count);
  1034. if (meshopt_simplifyDebugLoop)
  1035. memcpy(meshopt_simplifyDebugLoop, loop, vertex_count * sizeof(unsigned int));
  1036. #endif
  1037. return result_count;
  1038. }
  1039. size_t meshopt_simplifySloppy(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions_data, size_t vertex_count, size_t vertex_positions_stride, size_t target_index_count)
  1040. {
  1041. using namespace meshopt;
  1042. assert(index_count % 3 == 0);
  1043. assert(vertex_positions_stride > 0 && vertex_positions_stride <= 256);
  1044. assert(vertex_positions_stride % sizeof(float) == 0);
  1045. assert(target_index_count <= index_count);
  1046. // we expect to get ~2 triangles/vertex in the output
  1047. size_t target_cell_count = target_index_count / 6;
  1048. if (target_cell_count == 0)
  1049. return 0;
  1050. meshopt_Allocator allocator;
  1051. Vector3* vertex_positions = allocator.allocate<Vector3>(vertex_count);
  1052. rescalePositions(vertex_positions, vertex_positions_data, vertex_count, vertex_positions_stride);
  1053. // find the optimal grid size using guided binary search
  1054. #if TRACE
  1055. printf("source: %d vertices, %d triangles\n", int(vertex_count), int(index_count / 3));
  1056. printf("target: %d cells, %d triangles\n", int(target_cell_count), int(target_index_count / 3));
  1057. #endif
  1058. unsigned int* vertex_ids = allocator.allocate<unsigned int>(vertex_count);
  1059. const int kInterpolationPasses = 5;
  1060. // invariant: # of triangles in min_grid <= target_count
  1061. int min_grid = 0;
  1062. int max_grid = 1025;
  1063. size_t min_triangles = 0;
  1064. size_t max_triangles = index_count / 3;
  1065. // instead of starting in the middle, let's guess as to what the answer might be! triangle count usually grows as a square of grid size...
  1066. int next_grid_size = int(sqrtf(float(target_cell_count)) + 0.5f);
  1067. for (int pass = 0; pass < 10 + kInterpolationPasses; ++pass)
  1068. {
  1069. assert(min_triangles < target_index_count / 3);
  1070. assert(max_grid - min_grid > 1);
  1071. // we clamp the prediction of the grid size to make sure that the search converges
  1072. int grid_size = next_grid_size;
  1073. grid_size = (grid_size <= min_grid) ? min_grid + 1 : (grid_size >= max_grid) ? max_grid - 1 : grid_size;
  1074. computeVertexIds(vertex_ids, vertex_positions, vertex_count, grid_size);
  1075. size_t triangles = countTriangles(vertex_ids, indices, index_count);
  1076. #if TRACE
  1077. printf("pass %d (%s): grid size %d, triangles %d, %s\n",
  1078. pass, (pass == 0) ? "guess" : (pass <= kInterpolationPasses) ? "lerp" : "binary",
  1079. grid_size, int(triangles),
  1080. (triangles <= target_index_count / 3) ? "under" : "over");
  1081. #endif
  1082. float tip = interpolate(float(target_index_count / 3), float(min_grid), float(min_triangles), float(grid_size), float(triangles), float(max_grid), float(max_triangles));
  1083. if (triangles <= target_index_count / 3)
  1084. {
  1085. min_grid = grid_size;
  1086. min_triangles = triangles;
  1087. }
  1088. else
  1089. {
  1090. max_grid = grid_size;
  1091. max_triangles = triangles;
  1092. }
  1093. if (triangles == target_index_count / 3 || max_grid - min_grid <= 1)
  1094. break;
  1095. // we start by using interpolation search - it usually converges faster
  1096. // however, interpolation search has a worst case of O(N) so we switch to binary search after a few iterations which converges in O(logN)
  1097. next_grid_size = (pass < kInterpolationPasses) ? int(tip + 0.5f) : (min_grid + max_grid) / 2;
  1098. }
  1099. if (min_triangles == 0)
  1100. return 0;
  1101. // build vertex->cell association by mapping all vertices with the same quantized position to the same cell
  1102. size_t table_size = hashBuckets2(vertex_count);
  1103. unsigned int* table = allocator.allocate<unsigned int>(table_size);
  1104. unsigned int* vertex_cells = allocator.allocate<unsigned int>(vertex_count);
  1105. computeVertexIds(vertex_ids, vertex_positions, vertex_count, min_grid);
  1106. size_t cell_count = fillVertexCells(table, table_size, vertex_cells, vertex_ids, vertex_count);
  1107. // build a quadric for each target cell
  1108. Quadric* cell_quadrics = allocator.allocate<Quadric>(cell_count);
  1109. memset(cell_quadrics, 0, cell_count * sizeof(Quadric));
  1110. fillCellQuadrics(cell_quadrics, indices, index_count, vertex_positions, vertex_cells);
  1111. // for each target cell, find the vertex with the minimal error
  1112. unsigned int* cell_remap = allocator.allocate<unsigned int>(cell_count);
  1113. float* cell_errors = allocator.allocate<float>(cell_count);
  1114. fillCellRemap(cell_remap, cell_errors, cell_count, vertex_cells, cell_quadrics, vertex_positions, vertex_count);
  1115. // collapse triangles!
  1116. // note that we need to filter out triangles that we've already output because we very frequently generate redundant triangles between cells :(
  1117. size_t tritable_size = hashBuckets2(min_triangles);
  1118. unsigned int* tritable = allocator.allocate<unsigned int>(tritable_size);
  1119. size_t write = filterTriangles(destination, tritable, tritable_size, indices, index_count, vertex_cells, cell_remap);
  1120. assert(write <= target_index_count);
  1121. #if TRACE
  1122. printf("result: %d cells, %d triangles (%d unfiltered)\n", int(cell_count), int(write / 3), int(min_triangles));
  1123. #endif
  1124. return write;
  1125. }
  1126. size_t meshopt_simplifyPoints(unsigned int* destination, const float* vertex_positions_data, size_t vertex_count, size_t vertex_positions_stride, size_t target_vertex_count)
  1127. {
  1128. using namespace meshopt;
  1129. assert(vertex_positions_stride > 0 && vertex_positions_stride <= 256);
  1130. assert(vertex_positions_stride % sizeof(float) == 0);
  1131. assert(target_vertex_count <= vertex_count);
  1132. size_t target_cell_count = target_vertex_count;
  1133. if (target_cell_count == 0)
  1134. return 0;
  1135. meshopt_Allocator allocator;
  1136. Vector3* vertex_positions = allocator.allocate<Vector3>(vertex_count);
  1137. rescalePositions(vertex_positions, vertex_positions_data, vertex_count, vertex_positions_stride);
  1138. // find the optimal grid size using guided binary search
  1139. #if TRACE
  1140. printf("source: %d vertices\n", int(vertex_count));
  1141. printf("target: %d cells\n", int(target_cell_count));
  1142. #endif
  1143. unsigned int* vertex_ids = allocator.allocate<unsigned int>(vertex_count);
  1144. size_t table_size = hashBuckets2(vertex_count);
  1145. unsigned int* table = allocator.allocate<unsigned int>(table_size);
  1146. const int kInterpolationPasses = 5;
  1147. // invariant: # of vertices in min_grid <= target_count
  1148. int min_grid = 0;
  1149. int max_grid = 1025;
  1150. size_t min_vertices = 0;
  1151. size_t max_vertices = vertex_count;
  1152. // instead of starting in the middle, let's guess as to what the answer might be! triangle count usually grows as a square of grid size...
  1153. int next_grid_size = int(sqrtf(float(target_cell_count)) + 0.5f);
  1154. for (int pass = 0; pass < 10 + kInterpolationPasses; ++pass)
  1155. {
  1156. assert(min_vertices < target_vertex_count);
  1157. assert(max_grid - min_grid > 1);
  1158. // we clamp the prediction of the grid size to make sure that the search converges
  1159. int grid_size = next_grid_size;
  1160. grid_size = (grid_size <= min_grid) ? min_grid + 1 : (grid_size >= max_grid) ? max_grid - 1 : grid_size;
  1161. computeVertexIds(vertex_ids, vertex_positions, vertex_count, grid_size);
  1162. size_t vertices = countVertexCells(table, table_size, vertex_ids, vertex_count);
  1163. #if TRACE
  1164. printf("pass %d (%s): grid size %d, vertices %d, %s\n",
  1165. pass, (pass == 0) ? "guess" : (pass <= kInterpolationPasses) ? "lerp" : "binary",
  1166. grid_size, int(vertices),
  1167. (vertices <= target_vertex_count) ? "under" : "over");
  1168. #endif
  1169. float tip = interpolate(float(target_vertex_count), float(min_grid), float(min_vertices), float(grid_size), float(vertices), float(max_grid), float(max_vertices));
  1170. if (vertices <= target_vertex_count)
  1171. {
  1172. min_grid = grid_size;
  1173. min_vertices = vertices;
  1174. }
  1175. else
  1176. {
  1177. max_grid = grid_size;
  1178. max_vertices = vertices;
  1179. }
  1180. if (vertices == target_vertex_count || max_grid - min_grid <= 1)
  1181. break;
  1182. // we start by using interpolation search - it usually converges faster
  1183. // however, interpolation search has a worst case of O(N) so we switch to binary search after a few iterations which converges in O(logN)
  1184. next_grid_size = (pass < kInterpolationPasses) ? int(tip + 0.5f) : (min_grid + max_grid) / 2;
  1185. }
  1186. if (min_vertices == 0)
  1187. return 0;
  1188. // build vertex->cell association by mapping all vertices with the same quantized position to the same cell
  1189. unsigned int* vertex_cells = allocator.allocate<unsigned int>(vertex_count);
  1190. computeVertexIds(vertex_ids, vertex_positions, vertex_count, min_grid);
  1191. size_t cell_count = fillVertexCells(table, table_size, vertex_cells, vertex_ids, vertex_count);
  1192. // build a quadric for each target cell
  1193. Quadric* cell_quadrics = allocator.allocate<Quadric>(cell_count);
  1194. memset(cell_quadrics, 0, cell_count * sizeof(Quadric));
  1195. fillCellQuadrics(cell_quadrics, vertex_positions, vertex_count, vertex_cells);
  1196. // for each target cell, find the vertex with the minimal error
  1197. unsigned int* cell_remap = allocator.allocate<unsigned int>(cell_count);
  1198. float* cell_errors = allocator.allocate<float>(cell_count);
  1199. fillCellRemap(cell_remap, cell_errors, cell_count, vertex_cells, cell_quadrics, vertex_positions, vertex_count);
  1200. // copy results to the output
  1201. assert(cell_count <= target_vertex_count);
  1202. memcpy(destination, cell_remap, sizeof(unsigned int) * cell_count);
  1203. #if TRACE
  1204. printf("result: %d cells\n", int(cell_count));
  1205. #endif
  1206. return cell_count;
  1207. }