simplifier.cpp 101 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944
  1. // This file is part of meshoptimizer library; see meshoptimizer.h for version/license details
  2. #include "meshoptimizer.h"
  3. #include <assert.h>
  4. #include <float.h>
  5. #include <math.h>
  6. #include <string.h>
  7. #ifndef TRACE
  8. #define TRACE 0
  9. #endif
  10. #if TRACE
  11. #include <stdio.h>
  12. #endif
  13. #if TRACE
  14. #define TRACESTATS(i) stats[i]++;
  15. #else
  16. #define TRACESTATS(i) (void)0
  17. #endif
  18. // This work is based on:
  19. // Michael Garland and Paul S. Heckbert. Surface simplification using quadric error metrics. 1997
  20. // Michael Garland. Quadric-based polygonal surface simplification. 1999
  21. // Peter Lindstrom. Out-of-Core Simplification of Large Polygonal Models. 2000
  22. // Matthias Teschner, Bruno Heidelberger, Matthias Mueller, Danat Pomeranets, Markus Gross. Optimized Spatial Hashing for Collision Detection of Deformable Objects. 2003
  23. // Peter Van Sandt, Yannis Chronis, Jignesh M. Patel. Efficiently Searching In-Memory Sorted Arrays: Revenge of the Interpolation Search? 2019
  24. // Hugues Hoppe. New Quadric Metric for Simplifying Meshes with Appearance Attributes. 1999
  25. // Hugues Hoppe, Steve Marschner. Efficient Minimization of New Quadric Metric for Simplifying Meshes with Appearance Attributes. 2000
  26. namespace meshopt
  27. {
  28. struct EdgeAdjacency
  29. {
  30. struct Edge
  31. {
  32. unsigned int next;
  33. unsigned int prev;
  34. };
  35. unsigned int* offsets;
  36. Edge* data;
  37. };
  38. static void prepareEdgeAdjacency(EdgeAdjacency& adjacency, size_t index_count, size_t vertex_count, meshopt_Allocator& allocator)
  39. {
  40. adjacency.offsets = allocator.allocate<unsigned int>(vertex_count + 1);
  41. adjacency.data = allocator.allocate<EdgeAdjacency::Edge>(index_count);
  42. }
  43. static void updateEdgeAdjacency(EdgeAdjacency& adjacency, const unsigned int* indices, size_t index_count, size_t vertex_count, const unsigned int* remap)
  44. {
  45. size_t face_count = index_count / 3;
  46. unsigned int* offsets = adjacency.offsets + 1;
  47. EdgeAdjacency::Edge* data = adjacency.data;
  48. // fill edge counts
  49. memset(offsets, 0, vertex_count * sizeof(unsigned int));
  50. for (size_t i = 0; i < index_count; ++i)
  51. {
  52. unsigned int v = remap ? remap[indices[i]] : indices[i];
  53. assert(v < vertex_count);
  54. offsets[v]++;
  55. }
  56. // fill offset table
  57. unsigned int offset = 0;
  58. for (size_t i = 0; i < vertex_count; ++i)
  59. {
  60. unsigned int count = offsets[i];
  61. offsets[i] = offset;
  62. offset += count;
  63. }
  64. assert(offset == index_count);
  65. // fill edge data
  66. for (size_t i = 0; i < face_count; ++i)
  67. {
  68. unsigned int a = indices[i * 3 + 0], b = indices[i * 3 + 1], c = indices[i * 3 + 2];
  69. if (remap)
  70. {
  71. a = remap[a];
  72. b = remap[b];
  73. c = remap[c];
  74. }
  75. data[offsets[a]].next = b;
  76. data[offsets[a]].prev = c;
  77. offsets[a]++;
  78. data[offsets[b]].next = c;
  79. data[offsets[b]].prev = a;
  80. offsets[b]++;
  81. data[offsets[c]].next = a;
  82. data[offsets[c]].prev = b;
  83. offsets[c]++;
  84. }
  85. // finalize offsets
  86. adjacency.offsets[0] = 0;
  87. assert(adjacency.offsets[vertex_count] == index_count);
  88. }
  89. struct PositionHasher
  90. {
  91. const float* vertex_positions;
  92. size_t vertex_stride_float;
  93. const unsigned int* sparse_remap;
  94. size_t hash(unsigned int index) const
  95. {
  96. unsigned int ri = sparse_remap ? sparse_remap[index] : index;
  97. const unsigned int* key = reinterpret_cast<const unsigned int*>(vertex_positions + ri * vertex_stride_float);
  98. unsigned int x = key[0], y = key[1], z = key[2];
  99. // replace negative zero with zero
  100. x = (x == 0x80000000) ? 0 : x;
  101. y = (y == 0x80000000) ? 0 : y;
  102. z = (z == 0x80000000) ? 0 : z;
  103. // scramble bits to make sure that integer coordinates have entropy in lower bits
  104. x ^= x >> 17;
  105. y ^= y >> 17;
  106. z ^= z >> 17;
  107. // Optimized Spatial Hashing for Collision Detection of Deformable Objects
  108. return (x * 73856093) ^ (y * 19349663) ^ (z * 83492791);
  109. }
  110. bool equal(unsigned int lhs, unsigned int rhs) const
  111. {
  112. unsigned int li = sparse_remap ? sparse_remap[lhs] : lhs;
  113. unsigned int ri = sparse_remap ? sparse_remap[rhs] : rhs;
  114. const float* lv = vertex_positions + li * vertex_stride_float;
  115. const float* rv = vertex_positions + ri * vertex_stride_float;
  116. return lv[0] == rv[0] && lv[1] == rv[1] && lv[2] == rv[2];
  117. }
  118. };
  119. struct RemapHasher
  120. {
  121. unsigned int* remap;
  122. size_t hash(unsigned int id) const
  123. {
  124. return id * 0x5bd1e995;
  125. }
  126. bool equal(unsigned int lhs, unsigned int rhs) const
  127. {
  128. return remap[lhs] == rhs;
  129. }
  130. };
  131. static size_t hashBuckets2(size_t count)
  132. {
  133. size_t buckets = 1;
  134. while (buckets < count + count / 4)
  135. buckets *= 2;
  136. return buckets;
  137. }
  138. template <typename T, typename Hash>
  139. static T* hashLookup2(T* table, size_t buckets, const Hash& hash, const T& key, const T& empty)
  140. {
  141. assert(buckets > 0);
  142. assert((buckets & (buckets - 1)) == 0);
  143. size_t hashmod = buckets - 1;
  144. size_t bucket = hash.hash(key) & hashmod;
  145. for (size_t probe = 0; probe <= hashmod; ++probe)
  146. {
  147. T& item = table[bucket];
  148. if (item == empty)
  149. return &item;
  150. if (hash.equal(item, key))
  151. return &item;
  152. // hash collision, quadratic probing
  153. bucket = (bucket + probe + 1) & hashmod;
  154. }
  155. assert(false && "Hash table is full"); // unreachable
  156. return NULL;
  157. }
  158. static void buildPositionRemap(unsigned int* remap, unsigned int* wedge, const float* vertex_positions_data, size_t vertex_count, size_t vertex_positions_stride, const unsigned int* sparse_remap, meshopt_Allocator& allocator)
  159. {
  160. PositionHasher hasher = {vertex_positions_data, vertex_positions_stride / sizeof(float), sparse_remap};
  161. size_t table_size = hashBuckets2(vertex_count);
  162. unsigned int* table = allocator.allocate<unsigned int>(table_size);
  163. memset(table, -1, table_size * sizeof(unsigned int));
  164. // build forward remap: for each vertex, which other (canonical) vertex does it map to?
  165. // we use position equivalence for this, and remap vertices to other existing vertices
  166. for (size_t i = 0; i < vertex_count; ++i)
  167. {
  168. unsigned int index = unsigned(i);
  169. unsigned int* entry = hashLookup2(table, table_size, hasher, index, ~0u);
  170. if (*entry == ~0u)
  171. *entry = index;
  172. remap[index] = *entry;
  173. }
  174. allocator.deallocate(table);
  175. if (!wedge)
  176. return;
  177. // build wedge table: for each vertex, which other vertex is the next wedge that also maps to the same vertex?
  178. // entries in table form a (cyclic) wedge loop per vertex; for manifold vertices, wedge[i] == remap[i] == i
  179. for (size_t i = 0; i < vertex_count; ++i)
  180. wedge[i] = unsigned(i);
  181. for (size_t i = 0; i < vertex_count; ++i)
  182. if (remap[i] != i)
  183. {
  184. unsigned int r = remap[i];
  185. wedge[i] = wedge[r];
  186. wedge[r] = unsigned(i);
  187. }
  188. }
  189. static unsigned int* buildSparseRemap(unsigned int* indices, size_t index_count, size_t vertex_count, size_t* out_vertex_count, meshopt_Allocator& allocator)
  190. {
  191. // use a bit set to compute the precise number of unique vertices
  192. unsigned char* filter = allocator.allocate<unsigned char>((vertex_count + 7) / 8);
  193. for (size_t i = 0; i < index_count; ++i)
  194. {
  195. unsigned int index = indices[i];
  196. assert(index < vertex_count);
  197. filter[index / 8] = 0;
  198. }
  199. size_t unique = 0;
  200. for (size_t i = 0; i < index_count; ++i)
  201. {
  202. unsigned int index = indices[i];
  203. unique += (filter[index / 8] & (1 << (index % 8))) == 0;
  204. filter[index / 8] |= 1 << (index % 8);
  205. }
  206. unsigned int* remap = allocator.allocate<unsigned int>(unique);
  207. size_t offset = 0;
  208. // temporary map dense => sparse; we allocate it last so that we can deallocate it
  209. size_t revremap_size = hashBuckets2(unique);
  210. unsigned int* revremap = allocator.allocate<unsigned int>(revremap_size);
  211. memset(revremap, -1, revremap_size * sizeof(unsigned int));
  212. // fill remap, using revremap as a helper, and rewrite indices in the same pass
  213. RemapHasher hasher = {remap};
  214. for (size_t i = 0; i < index_count; ++i)
  215. {
  216. unsigned int index = indices[i];
  217. unsigned int* entry = hashLookup2(revremap, revremap_size, hasher, index, ~0u);
  218. if (*entry == ~0u)
  219. {
  220. remap[offset] = index;
  221. *entry = unsigned(offset);
  222. offset++;
  223. }
  224. indices[i] = *entry;
  225. }
  226. allocator.deallocate(revremap);
  227. assert(offset == unique);
  228. *out_vertex_count = unique;
  229. return remap;
  230. }
  231. enum VertexKind
  232. {
  233. Kind_Manifold, // not on an attribute seam, not on any boundary
  234. Kind_Border, // not on an attribute seam, has exactly two open edges
  235. Kind_Seam, // on an attribute seam with exactly two attribute seam edges
  236. Kind_Complex, // none of the above; these vertices can move as long as all wedges move to the target vertex
  237. Kind_Locked, // none of the above; these vertices can't move
  238. Kind_Count
  239. };
  240. // manifold vertices can collapse onto anything
  241. // border/seam vertices can collapse onto border/seam respectively, or locked
  242. // complex vertices can collapse onto complex/locked
  243. // a rule of thumb is that collapsing kind A into kind B preserves the kind B in the target vertex
  244. // for example, while we could collapse Complex into Manifold, this would mean the target vertex isn't Manifold anymore
  245. const unsigned char kCanCollapse[Kind_Count][Kind_Count] = {
  246. {1, 1, 1, 1, 1},
  247. {0, 1, 0, 0, 1},
  248. {0, 0, 1, 0, 1},
  249. {0, 0, 0, 1, 1},
  250. {0, 0, 0, 0, 0},
  251. };
  252. // if a vertex is manifold or seam, adjoining edges are guaranteed to have an opposite edge
  253. // note that for seam edges, the opposite edge isn't present in the attribute-based topology
  254. // but is present if you consider a position-only mesh variant
  255. // while many complex collapses have the opposite edge, since complex vertices collapse to the
  256. // same wedge, keeping opposite edges separate improves the quality by considering both targets
  257. const unsigned char kHasOpposite[Kind_Count][Kind_Count] = {
  258. {1, 1, 1, 1, 1},
  259. {1, 0, 1, 0, 0},
  260. {1, 1, 1, 0, 1},
  261. {1, 0, 0, 0, 0},
  262. {1, 0, 1, 0, 0},
  263. };
  264. static bool hasEdge(const EdgeAdjacency& adjacency, unsigned int a, unsigned int b)
  265. {
  266. unsigned int count = adjacency.offsets[a + 1] - adjacency.offsets[a];
  267. const EdgeAdjacency::Edge* edges = adjacency.data + adjacency.offsets[a];
  268. for (size_t i = 0; i < count; ++i)
  269. if (edges[i].next == b)
  270. return true;
  271. return false;
  272. }
  273. static bool hasEdge(const EdgeAdjacency& adjacency, unsigned int a, unsigned int b, const unsigned int* remap, const unsigned int* wedge)
  274. {
  275. unsigned int v = a;
  276. do
  277. {
  278. unsigned int count = adjacency.offsets[v + 1] - adjacency.offsets[v];
  279. const EdgeAdjacency::Edge* edges = adjacency.data + adjacency.offsets[v];
  280. for (size_t i = 0; i < count; ++i)
  281. if (remap[edges[i].next] == remap[b])
  282. return true;
  283. v = wedge[v];
  284. } while (v != a);
  285. return false;
  286. }
  287. static void classifyVertices(unsigned char* result, unsigned int* loop, unsigned int* loopback, size_t vertex_count, const EdgeAdjacency& adjacency, const unsigned int* remap, const unsigned int* wedge, const unsigned char* vertex_lock, const unsigned int* sparse_remap, unsigned int options)
  288. {
  289. memset(loop, -1, vertex_count * sizeof(unsigned int));
  290. memset(loopback, -1, vertex_count * sizeof(unsigned int));
  291. // incoming & outgoing open edges: ~0u if no open edges, i if there are more than 1
  292. // note that this is the same data as required in loop[] arrays; loop[] data is only used for border/seam by default
  293. // in permissive mode we also use it to guide complex-complex collapses, so we fill it for all vertices
  294. unsigned int* openinc = loopback;
  295. unsigned int* openout = loop;
  296. for (size_t i = 0; i < vertex_count; ++i)
  297. {
  298. unsigned int vertex = unsigned(i);
  299. unsigned int count = adjacency.offsets[vertex + 1] - adjacency.offsets[vertex];
  300. const EdgeAdjacency::Edge* edges = adjacency.data + adjacency.offsets[vertex];
  301. for (size_t j = 0; j < count; ++j)
  302. {
  303. unsigned int target = edges[j].next;
  304. if (target == vertex)
  305. {
  306. // degenerate triangles have two distinct edges instead of three, and the self edge
  307. // is bi-directional by definition; this can break border/seam classification by "closing"
  308. // the open edge from another triangle and falsely marking the vertex as manifold
  309. // instead we mark the vertex as having >1 open edges which turns it into locked/complex
  310. openinc[vertex] = openout[vertex] = vertex;
  311. }
  312. else if (!hasEdge(adjacency, target, vertex))
  313. {
  314. openinc[target] = (openinc[target] == ~0u) ? vertex : target;
  315. openout[vertex] = (openout[vertex] == ~0u) ? target : vertex;
  316. }
  317. }
  318. }
  319. #if TRACE
  320. size_t stats[4] = {};
  321. #endif
  322. for (size_t i = 0; i < vertex_count; ++i)
  323. {
  324. if (remap[i] == i)
  325. {
  326. if (wedge[i] == i)
  327. {
  328. // no attribute seam, need to check if it's manifold
  329. unsigned int openi = openinc[i], openo = openout[i];
  330. // note: we classify any vertices with no open edges as manifold
  331. // this is technically incorrect - if 4 triangles share an edge, we'll classify vertices as manifold
  332. // it's unclear if this is a problem in practice
  333. if (openi == ~0u && openo == ~0u)
  334. {
  335. result[i] = Kind_Manifold;
  336. }
  337. else if (openi != ~0u && openo != ~0u && remap[openi] == remap[openo] && openi != i)
  338. {
  339. // classify half-seams as seams (the branch below would mis-classify them as borders)
  340. // half-seam is a single vertex that connects to both vertices of a potential seam
  341. // treating these as seams allows collapsing the "full" seam vertex onto them
  342. result[i] = Kind_Seam;
  343. }
  344. else if (openi != i && openo != i)
  345. {
  346. result[i] = Kind_Border;
  347. }
  348. else
  349. {
  350. result[i] = Kind_Locked;
  351. TRACESTATS(0);
  352. }
  353. }
  354. else if (wedge[wedge[i]] == i)
  355. {
  356. // attribute seam; need to distinguish between Seam and Locked
  357. unsigned int w = wedge[i];
  358. unsigned int openiv = openinc[i], openov = openout[i];
  359. unsigned int openiw = openinc[w], openow = openout[w];
  360. // seam should have one open half-edge for each vertex, and the edges need to "connect" - point to the same vertex post-remap
  361. if (openiv != ~0u && openiv != i && openov != ~0u && openov != i &&
  362. openiw != ~0u && openiw != w && openow != ~0u && openow != w)
  363. {
  364. if (remap[openiv] == remap[openow] && remap[openov] == remap[openiw] && remap[openiv] != remap[openov])
  365. {
  366. result[i] = Kind_Seam;
  367. }
  368. else
  369. {
  370. result[i] = Kind_Locked;
  371. TRACESTATS(1);
  372. }
  373. }
  374. else
  375. {
  376. result[i] = Kind_Locked;
  377. TRACESTATS(2);
  378. }
  379. }
  380. else
  381. {
  382. // more than one vertex maps to this one; we don't have classification available
  383. result[i] = Kind_Locked;
  384. TRACESTATS(3);
  385. }
  386. }
  387. else
  388. {
  389. assert(remap[i] < i);
  390. result[i] = result[remap[i]];
  391. }
  392. }
  393. if (options & meshopt_SimplifyPermissive)
  394. for (size_t i = 0; i < vertex_count; ++i)
  395. if (result[i] == Kind_Seam || result[i] == Kind_Locked)
  396. {
  397. if (remap[i] != i)
  398. {
  399. // only process primary vertices; wedges will be updated to match the primary vertex
  400. result[i] = result[remap[i]];
  401. continue;
  402. }
  403. bool protect = false;
  404. // vertex_lock may protect any wedge, not just the primary vertex, so we switch to complex only if no wedges are protected
  405. unsigned int v = unsigned(i);
  406. do
  407. {
  408. unsigned int rv = sparse_remap ? sparse_remap[v] : v;
  409. protect |= vertex_lock && (vertex_lock[rv] & meshopt_SimplifyVertex_Protect) != 0;
  410. v = wedge[v];
  411. } while (v != i);
  412. // protect if any adjoining edge doesn't have an opposite edge (indicating vertex is on the border)
  413. do
  414. {
  415. const EdgeAdjacency::Edge* edges = &adjacency.data[adjacency.offsets[v]];
  416. size_t count = adjacency.offsets[v + 1] - adjacency.offsets[v];
  417. for (size_t j = 0; j < count; ++j)
  418. protect |= !hasEdge(adjacency, edges[j].next, v, remap, wedge);
  419. v = wedge[v];
  420. } while (v != i);
  421. result[i] = protect ? result[i] : int(Kind_Complex);
  422. }
  423. if (vertex_lock)
  424. {
  425. // vertex_lock may lock any wedge, not just the primary vertex, so we need to lock the primary vertex and relock any wedges
  426. for (size_t i = 0; i < vertex_count; ++i)
  427. {
  428. unsigned int ri = sparse_remap ? sparse_remap[i] : unsigned(i);
  429. if (vertex_lock[ri] & meshopt_SimplifyVertex_Lock)
  430. result[remap[i]] = Kind_Locked;
  431. }
  432. for (size_t i = 0; i < vertex_count; ++i)
  433. if (result[remap[i]] == Kind_Locked)
  434. result[i] = Kind_Locked;
  435. }
  436. if (options & meshopt_SimplifyLockBorder)
  437. for (size_t i = 0; i < vertex_count; ++i)
  438. if (result[i] == Kind_Border)
  439. result[i] = Kind_Locked;
  440. #if TRACE
  441. printf("locked: many open edges %d, disconnected seam %d, many seam edges %d, many wedges %d\n",
  442. int(stats[0]), int(stats[1]), int(stats[2]), int(stats[3]));
  443. #endif
  444. }
  445. struct Vector3
  446. {
  447. float x, y, z;
  448. };
  449. static float rescalePositions(Vector3* result, const float* vertex_positions_data, size_t vertex_count, size_t vertex_positions_stride, const unsigned int* sparse_remap = NULL, float* out_offset = NULL)
  450. {
  451. size_t vertex_stride_float = vertex_positions_stride / sizeof(float);
  452. float minv[3] = {FLT_MAX, FLT_MAX, FLT_MAX};
  453. float maxv[3] = {-FLT_MAX, -FLT_MAX, -FLT_MAX};
  454. for (size_t i = 0; i < vertex_count; ++i)
  455. {
  456. unsigned int ri = sparse_remap ? sparse_remap[i] : unsigned(i);
  457. const float* v = vertex_positions_data + ri * vertex_stride_float;
  458. if (result)
  459. {
  460. result[i].x = v[0];
  461. result[i].y = v[1];
  462. result[i].z = v[2];
  463. }
  464. for (int j = 0; j < 3; ++j)
  465. {
  466. float vj = v[j];
  467. minv[j] = minv[j] > vj ? vj : minv[j];
  468. maxv[j] = maxv[j] < vj ? vj : maxv[j];
  469. }
  470. }
  471. float extent = 0.f;
  472. extent = (maxv[0] - minv[0]) < extent ? extent : (maxv[0] - minv[0]);
  473. extent = (maxv[1] - minv[1]) < extent ? extent : (maxv[1] - minv[1]);
  474. extent = (maxv[2] - minv[2]) < extent ? extent : (maxv[2] - minv[2]);
  475. if (result)
  476. {
  477. float scale = extent == 0 ? 0.f : 1.f / extent;
  478. for (size_t i = 0; i < vertex_count; ++i)
  479. {
  480. result[i].x = (result[i].x - minv[0]) * scale;
  481. result[i].y = (result[i].y - minv[1]) * scale;
  482. result[i].z = (result[i].z - minv[2]) * scale;
  483. }
  484. }
  485. if (out_offset)
  486. {
  487. out_offset[0] = minv[0];
  488. out_offset[1] = minv[1];
  489. out_offset[2] = minv[2];
  490. }
  491. return extent;
  492. }
  493. static void rescaleAttributes(float* result, const float* vertex_attributes_data, size_t vertex_count, size_t vertex_attributes_stride, const float* attribute_weights, size_t attribute_count, const unsigned int* attribute_remap, const unsigned int* sparse_remap)
  494. {
  495. size_t vertex_attributes_stride_float = vertex_attributes_stride / sizeof(float);
  496. for (size_t i = 0; i < vertex_count; ++i)
  497. {
  498. unsigned int ri = sparse_remap ? sparse_remap[i] : unsigned(i);
  499. for (size_t k = 0; k < attribute_count; ++k)
  500. {
  501. unsigned int rk = attribute_remap[k];
  502. float a = vertex_attributes_data[ri * vertex_attributes_stride_float + rk];
  503. result[i * attribute_count + k] = a * attribute_weights[rk];
  504. }
  505. }
  506. }
  507. static void finalizeVertices(float* vertex_positions_data, size_t vertex_positions_stride, float* vertex_attributes_data, size_t vertex_attributes_stride, const float* attribute_weights, size_t attribute_count, size_t vertex_count, const Vector3* vertex_positions, const float* vertex_attributes, const unsigned int* sparse_remap, const unsigned int* attribute_remap, float vertex_scale, const float* vertex_offset, const unsigned char* vertex_kind, const unsigned char* vertex_update, const unsigned char* vertex_lock)
  508. {
  509. size_t vertex_positions_stride_float = vertex_positions_stride / sizeof(float);
  510. size_t vertex_attributes_stride_float = vertex_attributes_stride / sizeof(float);
  511. for (size_t i = 0; i < vertex_count; ++i)
  512. {
  513. if (!vertex_update[i])
  514. continue;
  515. unsigned int ri = sparse_remap ? sparse_remap[i] : unsigned(i);
  516. // updating externally locked vertices is not allowed
  517. if (vertex_lock && (vertex_lock[ri] & meshopt_SimplifyVertex_Lock) != 0)
  518. continue;
  519. // moving locked vertices may result in floating point drift
  520. if (vertex_kind[i] != Kind_Locked)
  521. {
  522. const Vector3& p = vertex_positions[i];
  523. float* v = vertex_positions_data + ri * vertex_positions_stride_float;
  524. v[0] = p.x * vertex_scale + vertex_offset[0];
  525. v[1] = p.y * vertex_scale + vertex_offset[1];
  526. v[2] = p.z * vertex_scale + vertex_offset[2];
  527. }
  528. if (attribute_count)
  529. {
  530. const float* sa = vertex_attributes + i * attribute_count;
  531. float* va = vertex_attributes_data + ri * vertex_attributes_stride_float;
  532. for (size_t k = 0; k < attribute_count; ++k)
  533. {
  534. unsigned int rk = attribute_remap[k];
  535. va[rk] = sa[k] / attribute_weights[rk];
  536. }
  537. }
  538. }
  539. }
  540. static const size_t kMaxAttributes = 32;
  541. struct Quadric
  542. {
  543. // a00*x^2 + a11*y^2 + a22*z^2 + 2*a10*xy + 2*a20*xz + 2*a21*yz + 2*b0*x + 2*b1*y + 2*b2*z + c
  544. float a00, a11, a22;
  545. float a10, a20, a21;
  546. float b0, b1, b2, c;
  547. float w;
  548. };
  549. struct QuadricGrad
  550. {
  551. // gx*x + gy*y + gz*z + gw
  552. float gx, gy, gz, gw;
  553. };
  554. struct Reservoir
  555. {
  556. float x, y, z;
  557. float r, g, b;
  558. float w;
  559. };
  560. struct Collapse
  561. {
  562. unsigned int v0;
  563. unsigned int v1;
  564. union
  565. {
  566. unsigned int bidi;
  567. float error;
  568. unsigned int errorui;
  569. };
  570. };
  571. static float normalize(Vector3& v)
  572. {
  573. float length = sqrtf(v.x * v.x + v.y * v.y + v.z * v.z);
  574. if (length > 0)
  575. {
  576. v.x /= length;
  577. v.y /= length;
  578. v.z /= length;
  579. }
  580. return length;
  581. }
  582. static void quadricAdd(Quadric& Q, const Quadric& R)
  583. {
  584. Q.a00 += R.a00;
  585. Q.a11 += R.a11;
  586. Q.a22 += R.a22;
  587. Q.a10 += R.a10;
  588. Q.a20 += R.a20;
  589. Q.a21 += R.a21;
  590. Q.b0 += R.b0;
  591. Q.b1 += R.b1;
  592. Q.b2 += R.b2;
  593. Q.c += R.c;
  594. Q.w += R.w;
  595. }
  596. static void quadricAdd(QuadricGrad& G, const QuadricGrad& R)
  597. {
  598. G.gx += R.gx;
  599. G.gy += R.gy;
  600. G.gz += R.gz;
  601. G.gw += R.gw;
  602. }
  603. static void quadricAdd(QuadricGrad* G, const QuadricGrad* R, size_t attribute_count)
  604. {
  605. for (size_t k = 0; k < attribute_count; ++k)
  606. {
  607. G[k].gx += R[k].gx;
  608. G[k].gy += R[k].gy;
  609. G[k].gz += R[k].gz;
  610. G[k].gw += R[k].gw;
  611. }
  612. }
  613. static float quadricEval(const Quadric& Q, const Vector3& v)
  614. {
  615. float rx = Q.b0;
  616. float ry = Q.b1;
  617. float rz = Q.b2;
  618. rx += Q.a10 * v.y;
  619. ry += Q.a21 * v.z;
  620. rz += Q.a20 * v.x;
  621. rx *= 2;
  622. ry *= 2;
  623. rz *= 2;
  624. rx += Q.a00 * v.x;
  625. ry += Q.a11 * v.y;
  626. rz += Q.a22 * v.z;
  627. float r = Q.c;
  628. r += rx * v.x;
  629. r += ry * v.y;
  630. r += rz * v.z;
  631. return r;
  632. }
  633. static float quadricError(const Quadric& Q, const Vector3& v)
  634. {
  635. float r = quadricEval(Q, v);
  636. float s = Q.w == 0.f ? 0.f : 1.f / Q.w;
  637. return fabsf(r) * s;
  638. }
  639. static float quadricError(const Quadric& Q, const QuadricGrad* G, size_t attribute_count, const Vector3& v, const float* va)
  640. {
  641. float r = quadricEval(Q, v);
  642. // see quadricFromAttributes for general derivation; here we need to add the parts of (eval(pos) - attr)^2 that depend on attr
  643. for (size_t k = 0; k < attribute_count; ++k)
  644. {
  645. float a = va[k];
  646. float g = v.x * G[k].gx + v.y * G[k].gy + v.z * G[k].gz + G[k].gw;
  647. r += a * (a * Q.w - 2 * g);
  648. }
  649. // note: unlike position error, we do not normalize by Q.w to retain edge scaling as described in quadricFromAttributes
  650. return fabsf(r);
  651. }
  652. static void quadricFromPlane(Quadric& Q, float a, float b, float c, float d, float w)
  653. {
  654. float aw = a * w;
  655. float bw = b * w;
  656. float cw = c * w;
  657. float dw = d * w;
  658. Q.a00 = a * aw;
  659. Q.a11 = b * bw;
  660. Q.a22 = c * cw;
  661. Q.a10 = a * bw;
  662. Q.a20 = a * cw;
  663. Q.a21 = b * cw;
  664. Q.b0 = a * dw;
  665. Q.b1 = b * dw;
  666. Q.b2 = c * dw;
  667. Q.c = d * dw;
  668. Q.w = w;
  669. }
  670. static void quadricFromPoint(Quadric& Q, float x, float y, float z, float w)
  671. {
  672. Q.a00 = Q.a11 = Q.a22 = w;
  673. Q.a10 = Q.a20 = Q.a21 = 0;
  674. Q.b0 = -x * w;
  675. Q.b1 = -y * w;
  676. Q.b2 = -z * w;
  677. Q.c = (x * x + y * y + z * z) * w;
  678. Q.w = w;
  679. }
  680. static void quadricFromTriangle(Quadric& Q, const Vector3& p0, const Vector3& p1, const Vector3& p2, float weight)
  681. {
  682. Vector3 p10 = {p1.x - p0.x, p1.y - p0.y, p1.z - p0.z};
  683. Vector3 p20 = {p2.x - p0.x, p2.y - p0.y, p2.z - p0.z};
  684. // normal = cross(p1 - p0, p2 - p0)
  685. Vector3 normal = {p10.y * p20.z - p10.z * p20.y, p10.z * p20.x - p10.x * p20.z, p10.x * p20.y - p10.y * p20.x};
  686. float area = normalize(normal);
  687. float distance = normal.x * p0.x + normal.y * p0.y + normal.z * p0.z;
  688. // we use sqrtf(area) so that the error is scaled linearly; this tends to improve silhouettes
  689. quadricFromPlane(Q, normal.x, normal.y, normal.z, -distance, sqrtf(area) * weight);
  690. }
  691. static void quadricFromTriangleEdge(Quadric& Q, const Vector3& p0, const Vector3& p1, const Vector3& p2, float weight)
  692. {
  693. Vector3 p10 = {p1.x - p0.x, p1.y - p0.y, p1.z - p0.z};
  694. // edge length; keep squared length around for projection correction
  695. float lengthsq = p10.x * p10.x + p10.y * p10.y + p10.z * p10.z;
  696. float length = sqrtf(lengthsq);
  697. // p20p = length of projection of p2-p0 onto p1-p0; note that p10 is unnormalized so we need to correct it later
  698. Vector3 p20 = {p2.x - p0.x, p2.y - p0.y, p2.z - p0.z};
  699. float p20p = p20.x * p10.x + p20.y * p10.y + p20.z * p10.z;
  700. // perp = perpendicular vector from p2 to line segment p1-p0
  701. // note: since p10 is unnormalized we need to correct the projection; we scale p20 instead to take advantage of normalize below
  702. Vector3 perp = {p20.x * lengthsq - p10.x * p20p, p20.y * lengthsq - p10.y * p20p, p20.z * lengthsq - p10.z * p20p};
  703. normalize(perp);
  704. float distance = perp.x * p0.x + perp.y * p0.y + perp.z * p0.z;
  705. // note: the weight is scaled linearly with edge length; this has to match the triangle weight
  706. quadricFromPlane(Q, perp.x, perp.y, perp.z, -distance, length * weight);
  707. }
  708. static void quadricFromAttributes(Quadric& Q, QuadricGrad* G, const Vector3& p0, const Vector3& p1, const Vector3& p2, const float* va0, const float* va1, const float* va2, size_t attribute_count)
  709. {
  710. // for each attribute we want to encode the following function into the quadric:
  711. // (eval(pos) - attr)^2
  712. // where eval(pos) interpolates attribute across the triangle like so:
  713. // eval(pos) = pos.x * gx + pos.y * gy + pos.z * gz + gw
  714. // where gx/gy/gz/gw are gradients
  715. Vector3 p10 = {p1.x - p0.x, p1.y - p0.y, p1.z - p0.z};
  716. Vector3 p20 = {p2.x - p0.x, p2.y - p0.y, p2.z - p0.z};
  717. // normal = cross(p1 - p0, p2 - p0)
  718. Vector3 normal = {p10.y * p20.z - p10.z * p20.y, p10.z * p20.x - p10.x * p20.z, p10.x * p20.y - p10.y * p20.x};
  719. float area = sqrtf(normal.x * normal.x + normal.y * normal.y + normal.z * normal.z) * 0.5f;
  720. // quadric is weighted with the square of edge length (= area)
  721. // this equalizes the units with the positional error (which, after normalization, is a square of distance)
  722. // as a result, a change in weighted attribute of 1 along distance d is approximately equivalent to a change in position of d
  723. float w = area;
  724. // we compute gradients using barycentric coordinates; barycentric coordinates can be computed as follows:
  725. // v = (d11 * d20 - d01 * d21) / denom
  726. // w = (d00 * d21 - d01 * d20) / denom
  727. // u = 1 - v - w
  728. // here v0, v1 are triangle edge vectors, v2 is a vector from point to triangle corner, and dij = dot(vi, vj)
  729. // note: v2 and d20/d21 can not be evaluated here as v2 is effectively an unknown variable; we need these only as variables for derivation of gradients
  730. const Vector3& v0 = p10;
  731. const Vector3& v1 = p20;
  732. float d00 = v0.x * v0.x + v0.y * v0.y + v0.z * v0.z;
  733. float d01 = v0.x * v1.x + v0.y * v1.y + v0.z * v1.z;
  734. float d11 = v1.x * v1.x + v1.y * v1.y + v1.z * v1.z;
  735. float denom = d00 * d11 - d01 * d01;
  736. float denomr = denom == 0 ? 0.f : 1.f / denom;
  737. // precompute gradient factors
  738. // these are derived by directly computing derivative of eval(pos) = a0 * u + a1 * v + a2 * w and factoring out expressions that are shared between attributes
  739. float gx1 = (d11 * v0.x - d01 * v1.x) * denomr;
  740. float gx2 = (d00 * v1.x - d01 * v0.x) * denomr;
  741. float gy1 = (d11 * v0.y - d01 * v1.y) * denomr;
  742. float gy2 = (d00 * v1.y - d01 * v0.y) * denomr;
  743. float gz1 = (d11 * v0.z - d01 * v1.z) * denomr;
  744. float gz2 = (d00 * v1.z - d01 * v0.z) * denomr;
  745. memset(&Q, 0, sizeof(Quadric));
  746. Q.w = w;
  747. for (size_t k = 0; k < attribute_count; ++k)
  748. {
  749. float a0 = va0[k], a1 = va1[k], a2 = va2[k];
  750. // compute gradient of eval(pos) for x/y/z/w
  751. // the formulas below are obtained by directly computing derivative of eval(pos) = a0 * u + a1 * v + a2 * w
  752. float gx = gx1 * (a1 - a0) + gx2 * (a2 - a0);
  753. float gy = gy1 * (a1 - a0) + gy2 * (a2 - a0);
  754. float gz = gz1 * (a1 - a0) + gz2 * (a2 - a0);
  755. float gw = a0 - p0.x * gx - p0.y * gy - p0.z * gz;
  756. // quadric encodes (eval(pos)-attr)^2; this means that the resulting expansion needs to compute, for example, pos.x * pos.y * K
  757. // since quadrics already encode factors for pos.x * pos.y, we can accumulate almost everything in basic quadric fields
  758. // note: for simplicity we scale all factors by weight here instead of outside the loop
  759. Q.a00 += w * (gx * gx);
  760. Q.a11 += w * (gy * gy);
  761. Q.a22 += w * (gz * gz);
  762. Q.a10 += w * (gy * gx);
  763. Q.a20 += w * (gz * gx);
  764. Q.a21 += w * (gz * gy);
  765. Q.b0 += w * (gx * gw);
  766. Q.b1 += w * (gy * gw);
  767. Q.b2 += w * (gz * gw);
  768. Q.c += w * (gw * gw);
  769. // the only remaining sum components are ones that depend on attr; these will be addded during error evaluation, see quadricError
  770. G[k].gx = w * gx;
  771. G[k].gy = w * gy;
  772. G[k].gz = w * gz;
  773. G[k].gw = w * gw;
  774. }
  775. }
  776. static void quadricVolumeGradient(QuadricGrad& G, const Vector3& p0, const Vector3& p1, const Vector3& p2)
  777. {
  778. Vector3 p10 = {p1.x - p0.x, p1.y - p0.y, p1.z - p0.z};
  779. Vector3 p20 = {p2.x - p0.x, p2.y - p0.y, p2.z - p0.z};
  780. // normal = cross(p1 - p0, p2 - p0)
  781. Vector3 normal = {p10.y * p20.z - p10.z * p20.y, p10.z * p20.x - p10.x * p20.z, p10.x * p20.y - p10.y * p20.x};
  782. float area = normalize(normal) * 0.5f;
  783. G.gx = normal.x * area;
  784. G.gy = normal.y * area;
  785. G.gz = normal.z * area;
  786. G.gw = (-p0.x * normal.x - p0.y * normal.y - p0.z * normal.z) * area;
  787. }
  788. static bool quadricSolve(Vector3& p, const Quadric& Q, const QuadricGrad& GV)
  789. {
  790. // solve A*p = -b where A is the quadric matrix and b is the linear term
  791. float a00 = Q.a00, a11 = Q.a11, a22 = Q.a22;
  792. float a10 = Q.a10, a20 = Q.a20, a21 = Q.a21;
  793. float x0 = -Q.b0, x1 = -Q.b1, x2 = -Q.b2;
  794. float eps = 1e-6f * Q.w;
  795. // LDL decomposition: A = LDL^T
  796. float d0 = a00;
  797. float l10 = a10 / d0;
  798. float l20 = a20 / d0;
  799. float d1 = a11 - a10 * l10;
  800. float dl21 = a21 - a20 * l10;
  801. float l21 = dl21 / d1;
  802. float d2 = a22 - a20 * l20 - dl21 * l21;
  803. // solve L*y = x
  804. float y0 = x0;
  805. float y1 = x1 - l10 * y0;
  806. float y2 = x2 - l20 * y0 - l21 * y1;
  807. // solve D*z = y
  808. float z0 = y0 / d0;
  809. float z1 = y1 / d1;
  810. float z2 = y2 / d2;
  811. // augment system with linear constraint GV using Lagrange multiplier
  812. float a30 = GV.gx, a31 = GV.gy, a32 = GV.gz;
  813. float x3 = -GV.gw;
  814. float l30 = a30 / d0;
  815. float dl31 = a31 - a30 * l10;
  816. float l31 = dl31 / d1;
  817. float dl32 = a32 - a30 * l20 - dl31 * l21;
  818. float l32 = dl32 / d2;
  819. float d3 = 0.f - a30 * l30 - dl31 * l31 - dl32 * l32;
  820. float y3 = x3 - l30 * y0 - l31 * y1 - l32 * y2;
  821. float z3 = fabsf(d3) > eps ? y3 / d3 : 0.f; // if d3 is zero, we can ignore the constraint
  822. // substitute L^T*p = z
  823. float lambda = z3;
  824. float pz = z2 - l32 * lambda;
  825. float py = z1 - l21 * pz - l31 * lambda;
  826. float px = z0 - l10 * py - l20 * pz - l30 * lambda;
  827. p.x = px;
  828. p.y = py;
  829. p.z = pz;
  830. return fabsf(d0) > eps && fabsf(d1) > eps && fabsf(d2) > eps;
  831. }
  832. static void quadricReduceAttributes(Quadric& Q, const Quadric& A, const QuadricGrad* G, size_t attribute_count)
  833. {
  834. // update vertex quadric with attribute quadric; multiply by vertex weight to minimize normalized error
  835. Q.a00 += A.a00 * Q.w;
  836. Q.a11 += A.a11 * Q.w;
  837. Q.a22 += A.a22 * Q.w;
  838. Q.a10 += A.a10 * Q.w;
  839. Q.a20 += A.a20 * Q.w;
  840. Q.a21 += A.a21 * Q.w;
  841. Q.b0 += A.b0 * Q.w;
  842. Q.b1 += A.b1 * Q.w;
  843. Q.b2 += A.b2 * Q.w;
  844. float iaw = A.w == 0 ? 0.f : Q.w / A.w;
  845. // update linear system based on attribute gradients (BB^T/a)
  846. for (size_t k = 0; k < attribute_count; ++k)
  847. {
  848. const QuadricGrad& g = G[k];
  849. Q.a00 -= (g.gx * g.gx) * iaw;
  850. Q.a11 -= (g.gy * g.gy) * iaw;
  851. Q.a22 -= (g.gz * g.gz) * iaw;
  852. Q.a10 -= (g.gx * g.gy) * iaw;
  853. Q.a20 -= (g.gx * g.gz) * iaw;
  854. Q.a21 -= (g.gy * g.gz) * iaw;
  855. Q.b0 -= (g.gx * g.gw) * iaw;
  856. Q.b1 -= (g.gy * g.gw) * iaw;
  857. Q.b2 -= (g.gz * g.gw) * iaw;
  858. }
  859. }
  860. static void fillFaceQuadrics(Quadric* vertex_quadrics, QuadricGrad* volume_gradients, const unsigned int* indices, size_t index_count, const Vector3* vertex_positions, const unsigned int* remap)
  861. {
  862. for (size_t i = 0; i < index_count; i += 3)
  863. {
  864. unsigned int i0 = indices[i + 0];
  865. unsigned int i1 = indices[i + 1];
  866. unsigned int i2 = indices[i + 2];
  867. Quadric Q;
  868. quadricFromTriangle(Q, vertex_positions[i0], vertex_positions[i1], vertex_positions[i2], 1.f);
  869. quadricAdd(vertex_quadrics[remap[i0]], Q);
  870. quadricAdd(vertex_quadrics[remap[i1]], Q);
  871. quadricAdd(vertex_quadrics[remap[i2]], Q);
  872. if (volume_gradients)
  873. {
  874. QuadricGrad GV;
  875. quadricVolumeGradient(GV, vertex_positions[i0], vertex_positions[i1], vertex_positions[i2]);
  876. quadricAdd(volume_gradients[remap[i0]], GV);
  877. quadricAdd(volume_gradients[remap[i1]], GV);
  878. quadricAdd(volume_gradients[remap[i2]], GV);
  879. }
  880. }
  881. }
  882. static void fillVertexQuadrics(Quadric* vertex_quadrics, const Vector3* vertex_positions, size_t vertex_count, const unsigned int* remap, unsigned int options)
  883. {
  884. // by default, we use a very small weight to improve triangulation and numerical stability without affecting the shape or error
  885. float factor = (options & meshopt_SimplifyRegularize) ? 1e-1f : 1e-7f;
  886. for (size_t i = 0; i < vertex_count; ++i)
  887. {
  888. if (remap[i] != i)
  889. continue;
  890. const Vector3& p = vertex_positions[i];
  891. float w = vertex_quadrics[i].w * factor;
  892. Quadric Q;
  893. quadricFromPoint(Q, p.x, p.y, p.z, w);
  894. quadricAdd(vertex_quadrics[i], Q);
  895. }
  896. }
  897. static void fillEdgeQuadrics(Quadric* vertex_quadrics, const unsigned int* indices, size_t index_count, const Vector3* vertex_positions, const unsigned int* remap, const unsigned char* vertex_kind, const unsigned int* loop, const unsigned int* loopback)
  898. {
  899. for (size_t i = 0; i < index_count; i += 3)
  900. {
  901. static const int next[4] = {1, 2, 0, 1};
  902. for (int e = 0; e < 3; ++e)
  903. {
  904. unsigned int i0 = indices[i + e];
  905. unsigned int i1 = indices[i + next[e]];
  906. unsigned char k0 = vertex_kind[i0];
  907. unsigned char k1 = vertex_kind[i1];
  908. // check that either i0 or i1 are border/seam and are on the same edge loop
  909. // note that we need to add the error even for edged that connect e.g. border & locked
  910. // if we don't do that, the adjacent border->border edge won't have correct errors for corners
  911. if (k0 != Kind_Border && k0 != Kind_Seam && k1 != Kind_Border && k1 != Kind_Seam)
  912. continue;
  913. if ((k0 == Kind_Border || k0 == Kind_Seam) && loop[i0] != i1)
  914. continue;
  915. if ((k1 == Kind_Border || k1 == Kind_Seam) && loopback[i1] != i0)
  916. continue;
  917. unsigned int i2 = indices[i + next[e + 1]];
  918. // we try hard to maintain border edge geometry; seam edges can move more freely
  919. // due to topological restrictions on collapses, seam quadrics slightly improves collapse structure but aren't critical
  920. const float kEdgeWeightSeam = 0.5f; // applied twice due to opposite edges
  921. const float kEdgeWeightBorder = 10.f;
  922. float edgeWeight = (k0 == Kind_Border || k1 == Kind_Border) ? kEdgeWeightBorder : kEdgeWeightSeam;
  923. Quadric Q;
  924. quadricFromTriangleEdge(Q, vertex_positions[i0], vertex_positions[i1], vertex_positions[i2], edgeWeight);
  925. Quadric QT;
  926. quadricFromTriangle(QT, vertex_positions[i0], vertex_positions[i1], vertex_positions[i2], edgeWeight);
  927. // mix edge quadric with triangle quadric to stabilize collapses in both directions; both quadrics inherit edge weight so that their error is added
  928. QT.w = 0;
  929. quadricAdd(Q, QT);
  930. quadricAdd(vertex_quadrics[remap[i0]], Q);
  931. quadricAdd(vertex_quadrics[remap[i1]], Q);
  932. }
  933. }
  934. }
  935. static void fillAttributeQuadrics(Quadric* attribute_quadrics, QuadricGrad* attribute_gradients, const unsigned int* indices, size_t index_count, const Vector3* vertex_positions, const float* vertex_attributes, size_t attribute_count)
  936. {
  937. for (size_t i = 0; i < index_count; i += 3)
  938. {
  939. unsigned int i0 = indices[i + 0];
  940. unsigned int i1 = indices[i + 1];
  941. unsigned int i2 = indices[i + 2];
  942. Quadric QA;
  943. QuadricGrad G[kMaxAttributes];
  944. quadricFromAttributes(QA, G, vertex_positions[i0], vertex_positions[i1], vertex_positions[i2], &vertex_attributes[i0 * attribute_count], &vertex_attributes[i1 * attribute_count], &vertex_attributes[i2 * attribute_count], attribute_count);
  945. quadricAdd(attribute_quadrics[i0], QA);
  946. quadricAdd(attribute_quadrics[i1], QA);
  947. quadricAdd(attribute_quadrics[i2], QA);
  948. quadricAdd(&attribute_gradients[i0 * attribute_count], G, attribute_count);
  949. quadricAdd(&attribute_gradients[i1 * attribute_count], G, attribute_count);
  950. quadricAdd(&attribute_gradients[i2 * attribute_count], G, attribute_count);
  951. }
  952. }
  953. // does triangle ABC flip when C is replaced with D?
  954. static bool hasTriangleFlip(const Vector3& a, const Vector3& b, const Vector3& c, const Vector3& d)
  955. {
  956. Vector3 eb = {b.x - a.x, b.y - a.y, b.z - a.z};
  957. Vector3 ec = {c.x - a.x, c.y - a.y, c.z - a.z};
  958. Vector3 ed = {d.x - a.x, d.y - a.y, d.z - a.z};
  959. Vector3 nbc = {eb.y * ec.z - eb.z * ec.y, eb.z * ec.x - eb.x * ec.z, eb.x * ec.y - eb.y * ec.x};
  960. Vector3 nbd = {eb.y * ed.z - eb.z * ed.y, eb.z * ed.x - eb.x * ed.z, eb.x * ed.y - eb.y * ed.x};
  961. float ndp = nbc.x * nbd.x + nbc.y * nbd.y + nbc.z * nbd.z;
  962. float abc = nbc.x * nbc.x + nbc.y * nbc.y + nbc.z * nbc.z;
  963. float abd = nbd.x * nbd.x + nbd.y * nbd.y + nbd.z * nbd.z;
  964. // scale is cos(angle); somewhat arbitrarily set to ~75 degrees
  965. // note that the "pure" check is ndp <= 0 (90 degree cutoff) but that allows flipping through a series of close-to-90 collapses
  966. return ndp <= 0.25f * sqrtf(abc * abd);
  967. }
  968. static bool hasTriangleFlips(const EdgeAdjacency& adjacency, const Vector3* vertex_positions, const unsigned int* collapse_remap, unsigned int i0, unsigned int i1)
  969. {
  970. assert(collapse_remap[i0] == i0);
  971. assert(collapse_remap[i1] == i1);
  972. const Vector3& v0 = vertex_positions[i0];
  973. const Vector3& v1 = vertex_positions[i1];
  974. const EdgeAdjacency::Edge* edges = &adjacency.data[adjacency.offsets[i0]];
  975. size_t count = adjacency.offsets[i0 + 1] - adjacency.offsets[i0];
  976. for (size_t i = 0; i < count; ++i)
  977. {
  978. unsigned int a = collapse_remap[edges[i].next];
  979. unsigned int b = collapse_remap[edges[i].prev];
  980. // skip triangles that will get collapsed by i0->i1 collapse or already got collapsed previously
  981. if (a == i1 || b == i1 || a == b)
  982. continue;
  983. // early-out when at least one triangle flips due to a collapse
  984. if (hasTriangleFlip(vertex_positions[a], vertex_positions[b], v0, v1))
  985. {
  986. #if TRACE >= 2
  987. printf("edge block %d -> %d: flip welded %d %d %d\n", i0, i1, a, i0, b);
  988. #endif
  989. return true;
  990. }
  991. }
  992. return false;
  993. }
  994. static bool hasTriangleFlips(const EdgeAdjacency& adjacency, const Vector3* vertex_positions, unsigned int i0, const Vector3& v1)
  995. {
  996. const Vector3& v0 = vertex_positions[i0];
  997. const EdgeAdjacency::Edge* edges = &adjacency.data[adjacency.offsets[i0]];
  998. size_t count = adjacency.offsets[i0 + 1] - adjacency.offsets[i0];
  999. for (size_t i = 0; i < count; ++i)
  1000. {
  1001. unsigned int a = edges[i].next, b = edges[i].prev;
  1002. if (hasTriangleFlip(vertex_positions[a], vertex_positions[b], v0, v1))
  1003. return true;
  1004. }
  1005. return false;
  1006. }
  1007. static float getNeighborhoodRadius(const EdgeAdjacency& adjacency, const Vector3* vertex_positions, unsigned int i0)
  1008. {
  1009. const Vector3& v0 = vertex_positions[i0];
  1010. const EdgeAdjacency::Edge* edges = &adjacency.data[adjacency.offsets[i0]];
  1011. size_t count = adjacency.offsets[i0 + 1] - adjacency.offsets[i0];
  1012. float result = 0.f;
  1013. for (size_t i = 0; i < count; ++i)
  1014. {
  1015. unsigned int a = edges[i].next, b = edges[i].prev;
  1016. const Vector3& va = vertex_positions[a];
  1017. const Vector3& vb = vertex_positions[b];
  1018. float da = (va.x - v0.x) * (va.x - v0.x) + (va.y - v0.y) * (va.y - v0.y) + (va.z - v0.z) * (va.z - v0.z);
  1019. float db = (vb.x - v0.x) * (vb.x - v0.x) + (vb.y - v0.y) * (vb.y - v0.y) + (vb.z - v0.z) * (vb.z - v0.z);
  1020. result = result < da ? da : result;
  1021. result = result < db ? db : result;
  1022. }
  1023. return sqrtf(result);
  1024. }
  1025. static unsigned int getComplexTarget(unsigned int v, unsigned int target, const unsigned int* remap, const unsigned int* loop, const unsigned int* loopback)
  1026. {
  1027. unsigned int r = remap[target];
  1028. // use loop metadata to guide complex collapses towards the correct wedge
  1029. // this works for edges on attribute discontinuities because loop/loopback track the single half-edge without a pair, similar to seams
  1030. if (loop[v] != ~0u && remap[loop[v]] == r)
  1031. return loop[v];
  1032. else if (loopback[v] != ~0u && remap[loopback[v]] == r)
  1033. return loopback[v];
  1034. else
  1035. return target;
  1036. }
  1037. static size_t boundEdgeCollapses(const EdgeAdjacency& adjacency, size_t vertex_count, size_t index_count, unsigned char* vertex_kind)
  1038. {
  1039. size_t dual_count = 0;
  1040. for (size_t i = 0; i < vertex_count; ++i)
  1041. {
  1042. unsigned char k = vertex_kind[i];
  1043. unsigned int e = adjacency.offsets[i + 1] - adjacency.offsets[i];
  1044. dual_count += (k == Kind_Manifold || k == Kind_Seam) ? e : 0;
  1045. }
  1046. assert(dual_count <= index_count);
  1047. // pad capacity by 3 so that we can check for overflow once per triangle instead of once per edge
  1048. return (index_count - dual_count / 2) + 3;
  1049. }
  1050. static size_t pickEdgeCollapses(Collapse* collapses, size_t collapse_capacity, const unsigned int* indices, size_t index_count, const unsigned int* remap, const unsigned char* vertex_kind, const unsigned int* loop, const unsigned int* loopback)
  1051. {
  1052. size_t collapse_count = 0;
  1053. for (size_t i = 0; i < index_count; i += 3)
  1054. {
  1055. static const int next[3] = {1, 2, 0};
  1056. // this should never happen as boundEdgeCollapses should give an upper bound for the collapse count, but in an unlikely event it does we can just drop extra collapses
  1057. if (collapse_count + 3 > collapse_capacity)
  1058. break;
  1059. for (int e = 0; e < 3; ++e)
  1060. {
  1061. unsigned int i0 = indices[i + e];
  1062. unsigned int i1 = indices[i + next[e]];
  1063. // this can happen either when input has a zero-length edge, or when we perform collapses for complex
  1064. // topology w/seams and collapse a manifold vertex that connects to both wedges onto one of them
  1065. // we leave edges like this alone since they may be important for preserving mesh integrity
  1066. if (remap[i0] == remap[i1])
  1067. continue;
  1068. unsigned char k0 = vertex_kind[i0];
  1069. unsigned char k1 = vertex_kind[i1];
  1070. // the edge has to be collapsible in at least one direction
  1071. if (!(kCanCollapse[k0][k1] | kCanCollapse[k1][k0]))
  1072. continue;
  1073. // manifold and seam edges should occur twice (i0->i1 and i1->i0) - skip redundant edges
  1074. if (kHasOpposite[k0][k1] && remap[i1] > remap[i0])
  1075. continue;
  1076. // two vertices are on a border or a seam, but there's no direct edge between them
  1077. // this indicates that they belong to two different edge loops and we should not collapse this edge
  1078. // loop[] and loopback[] track half edges so we only need to check one of them
  1079. if ((k0 == Kind_Border || k0 == Kind_Seam) && k1 != Kind_Manifold && loop[i0] != i1)
  1080. continue;
  1081. if ((k1 == Kind_Border || k1 == Kind_Seam) && k0 != Kind_Manifold && loopback[i1] != i0)
  1082. continue;
  1083. // edge can be collapsed in either direction - we will pick the one with minimum error
  1084. // note: we evaluate error later during collapse ranking, here we just tag the edge as bidirectional
  1085. if (kCanCollapse[k0][k1] & kCanCollapse[k1][k0])
  1086. {
  1087. Collapse c = {i0, i1, {/* bidi= */ 1}};
  1088. collapses[collapse_count++] = c;
  1089. }
  1090. else
  1091. {
  1092. // edge can only be collapsed in one direction
  1093. unsigned int e0 = kCanCollapse[k0][k1] ? i0 : i1;
  1094. unsigned int e1 = kCanCollapse[k0][k1] ? i1 : i0;
  1095. Collapse c = {e0, e1, {/* bidi= */ 0}};
  1096. collapses[collapse_count++] = c;
  1097. }
  1098. }
  1099. }
  1100. return collapse_count;
  1101. }
  1102. static void rankEdgeCollapses(Collapse* collapses, size_t collapse_count, const Vector3* vertex_positions, const float* vertex_attributes, const Quadric* vertex_quadrics, const Quadric* attribute_quadrics, const QuadricGrad* attribute_gradients, size_t attribute_count, const unsigned int* remap, const unsigned int* wedge, const unsigned char* vertex_kind, const unsigned int* loop, const unsigned int* loopback)
  1103. {
  1104. for (size_t i = 0; i < collapse_count; ++i)
  1105. {
  1106. Collapse& c = collapses[i];
  1107. unsigned int i0 = c.v0;
  1108. unsigned int i1 = c.v1;
  1109. bool bidi = c.bidi;
  1110. float ei = quadricError(vertex_quadrics[remap[i0]], vertex_positions[i1]);
  1111. float ej = bidi ? quadricError(vertex_quadrics[remap[i1]], vertex_positions[i0]) : FLT_MAX;
  1112. #if TRACE >= 3
  1113. float di = ei, dj = ej;
  1114. #endif
  1115. if (attribute_count)
  1116. {
  1117. ei += quadricError(attribute_quadrics[i0], &attribute_gradients[i0 * attribute_count], attribute_count, vertex_positions[i1], &vertex_attributes[i1 * attribute_count]);
  1118. ej += bidi ? quadricError(attribute_quadrics[i1], &attribute_gradients[i1 * attribute_count], attribute_count, vertex_positions[i0], &vertex_attributes[i0 * attribute_count]) : 0;
  1119. // seam edges need to aggregate attribute errors between primary and secondary edges, as attribute quadrics are separate
  1120. if (vertex_kind[i0] == Kind_Seam)
  1121. {
  1122. // for seam collapses we need to find the seam pair; this is a bit tricky since we need to rely on edge loops as target vertex may be locked (and thus have more than two wedges)
  1123. unsigned int s0 = wedge[i0];
  1124. unsigned int s1 = loop[i0] == i1 ? loopback[s0] : loop[s0];
  1125. assert(wedge[s0] == i0); // s0 may be equal to i0 for half-seams
  1126. assert(s1 != ~0u && remap[s1] == remap[i1]);
  1127. // note: this should never happen due to the assertion above, but when disabled if we ever hit this case we'll get a memory safety issue; for now play it safe
  1128. s1 = (s1 != ~0u) ? s1 : wedge[i1];
  1129. ei += quadricError(attribute_quadrics[s0], &attribute_gradients[s0 * attribute_count], attribute_count, vertex_positions[s1], &vertex_attributes[s1 * attribute_count]);
  1130. ej += bidi ? quadricError(attribute_quadrics[s1], &attribute_gradients[s1 * attribute_count], attribute_count, vertex_positions[s0], &vertex_attributes[s0 * attribute_count]) : 0;
  1131. }
  1132. else
  1133. {
  1134. // complex edges can have multiple wedges, so we need to aggregate errors for all wedges based on the selected target
  1135. if (vertex_kind[i0] == Kind_Complex)
  1136. for (unsigned int v = wedge[i0]; v != i0; v = wedge[v])
  1137. {
  1138. unsigned int t = getComplexTarget(v, i1, remap, loop, loopback);
  1139. ei += quadricError(attribute_quadrics[v], &attribute_gradients[v * attribute_count], attribute_count, vertex_positions[t], &vertex_attributes[t * attribute_count]);
  1140. }
  1141. if (vertex_kind[i1] == Kind_Complex && bidi)
  1142. for (unsigned int v = wedge[i1]; v != i1; v = wedge[v])
  1143. {
  1144. unsigned int t = getComplexTarget(v, i0, remap, loop, loopback);
  1145. ej += quadricError(attribute_quadrics[v], &attribute_gradients[v * attribute_count], attribute_count, vertex_positions[t], &vertex_attributes[t * attribute_count]);
  1146. }
  1147. }
  1148. }
  1149. // pick edge direction with minimal error (branchless)
  1150. bool rev = bidi & (ej < ei);
  1151. c.v0 = rev ? i1 : i0;
  1152. c.v1 = rev ? i0 : i1;
  1153. c.error = ej < ei ? ej : ei;
  1154. #if TRACE >= 3
  1155. if (bidi)
  1156. printf("edge eval %d -> %d: error %f (pos %f, attr %f); reverse %f (pos %f, attr %f)\n",
  1157. rev ? i1 : i0, rev ? i0 : i1,
  1158. sqrtf(rev ? ej : ei), sqrtf(rev ? dj : di), sqrtf(rev ? ej - dj : ei - di),
  1159. sqrtf(rev ? ei : ej), sqrtf(rev ? di : dj), sqrtf(rev ? ei - di : ej - dj));
  1160. else
  1161. printf("edge eval %d -> %d: error %f (pos %f, attr %f)\n", i0, i1, sqrtf(c.error), sqrtf(di), sqrtf(ei - di));
  1162. #endif
  1163. }
  1164. }
  1165. static void sortEdgeCollapses(unsigned int* sort_order, const Collapse* collapses, size_t collapse_count)
  1166. {
  1167. // we use counting sort to order collapses by error; since the exact sort order is not as critical,
  1168. // only top 12 bits of exponent+mantissa (8 bits of exponent and 4 bits of mantissa) are used.
  1169. // to avoid excessive stack usage, we clamp the exponent range as collapses with errors much higher than 1 are not useful.
  1170. const unsigned int sort_bits = 12;
  1171. const unsigned int sort_bins = 2048 + 512; // exponent range [-127, 32)
  1172. // fill histogram for counting sort
  1173. unsigned int histogram[sort_bins];
  1174. memset(histogram, 0, sizeof(histogram));
  1175. for (size_t i = 0; i < collapse_count; ++i)
  1176. {
  1177. // skip sign bit since error is non-negative
  1178. unsigned int error = collapses[i].errorui;
  1179. unsigned int key = (error << 1) >> (32 - sort_bits);
  1180. key = key < sort_bins ? key : sort_bins - 1;
  1181. histogram[key]++;
  1182. }
  1183. // compute offsets based on histogram data
  1184. size_t histogram_sum = 0;
  1185. for (size_t i = 0; i < sort_bins; ++i)
  1186. {
  1187. size_t count = histogram[i];
  1188. histogram[i] = unsigned(histogram_sum);
  1189. histogram_sum += count;
  1190. }
  1191. assert(histogram_sum == collapse_count);
  1192. // compute sort order based on offsets
  1193. for (size_t i = 0; i < collapse_count; ++i)
  1194. {
  1195. // skip sign bit since error is non-negative
  1196. unsigned int error = collapses[i].errorui;
  1197. unsigned int key = (error << 1) >> (32 - sort_bits);
  1198. key = key < sort_bins ? key : sort_bins - 1;
  1199. sort_order[histogram[key]++] = unsigned(i);
  1200. }
  1201. }
  1202. static size_t performEdgeCollapses(unsigned int* collapse_remap, unsigned char* collapse_locked, const Collapse* collapses, size_t collapse_count, const unsigned int* collapse_order, const unsigned int* remap, const unsigned int* wedge, const unsigned char* vertex_kind, const unsigned int* loop, const unsigned int* loopback, const Vector3* vertex_positions, const EdgeAdjacency& adjacency, size_t triangle_collapse_goal, float error_limit, float& result_error)
  1203. {
  1204. size_t edge_collapses = 0;
  1205. size_t triangle_collapses = 0;
  1206. // most collapses remove 2 triangles; use this to establish a bound on the pass in terms of error limit
  1207. // note that edge_collapse_goal is an estimate; triangle_collapse_goal will be used to actually limit collapses
  1208. size_t edge_collapse_goal = triangle_collapse_goal / 2;
  1209. #if TRACE
  1210. size_t stats[7] = {};
  1211. #endif
  1212. for (size_t i = 0; i < collapse_count; ++i)
  1213. {
  1214. const Collapse& c = collapses[collapse_order[i]];
  1215. TRACESTATS(0);
  1216. if (c.error > error_limit)
  1217. {
  1218. TRACESTATS(4);
  1219. break;
  1220. }
  1221. if (triangle_collapses >= triangle_collapse_goal)
  1222. {
  1223. TRACESTATS(5);
  1224. break;
  1225. }
  1226. // we limit the error in each pass based on the error of optimal last collapse; since many collapses will be locked
  1227. // as they will share vertices with other successfull collapses, we need to increase the acceptable error by some factor
  1228. float error_goal = edge_collapse_goal < collapse_count ? 1.5f * collapses[collapse_order[edge_collapse_goal]].error : FLT_MAX;
  1229. // on average, each collapse is expected to lock 6 other collapses; to avoid degenerate passes on meshes with odd
  1230. // topology, we only abort if we got over 1/6 collapses accordingly.
  1231. if (c.error > error_goal && c.error > result_error && triangle_collapses > triangle_collapse_goal / 6)
  1232. {
  1233. TRACESTATS(6);
  1234. break;
  1235. }
  1236. unsigned int i0 = c.v0;
  1237. unsigned int i1 = c.v1;
  1238. unsigned int r0 = remap[i0];
  1239. unsigned int r1 = remap[i1];
  1240. unsigned char kind = vertex_kind[i0];
  1241. // we don't collapse vertices that had source or target vertex involved in a collapse
  1242. // it's important to not move the vertices twice since it complicates the tracking/remapping logic
  1243. // it's important to not move other vertices towards a moved vertex to preserve error since we don't re-rank collapses mid-pass
  1244. if (collapse_locked[r0] | collapse_locked[r1])
  1245. {
  1246. TRACESTATS(1);
  1247. continue;
  1248. }
  1249. if (hasTriangleFlips(adjacency, vertex_positions, collapse_remap, r0, r1))
  1250. {
  1251. // adjust collapse goal since this collapse is invalid and shouldn't factor into error goal
  1252. edge_collapse_goal++;
  1253. TRACESTATS(2);
  1254. continue;
  1255. }
  1256. #if TRACE >= 2
  1257. printf("edge commit %d -> %d: kind %d->%d, error %f\n", i0, i1, vertex_kind[i0], vertex_kind[i1], sqrtf(c.error));
  1258. #endif
  1259. assert(collapse_remap[r0] == r0);
  1260. assert(collapse_remap[r1] == r1);
  1261. if (kind == Kind_Complex)
  1262. {
  1263. // remap all vertices in the complex to the target vertex
  1264. unsigned int v = i0;
  1265. do
  1266. {
  1267. unsigned int t = getComplexTarget(v, i1, remap, loop, loopback);
  1268. collapse_remap[v] = t;
  1269. v = wedge[v];
  1270. } while (v != i0);
  1271. }
  1272. else if (kind == Kind_Seam)
  1273. {
  1274. // for seam collapses we need to move the seam pair together; this is a bit tricky since we need to rely on edge loops as target vertex may be locked (and thus have more than two wedges)
  1275. unsigned int s0 = wedge[i0];
  1276. unsigned int s1 = loop[i0] == i1 ? loopback[s0] : loop[s0];
  1277. assert(wedge[s0] == i0); // s0 may be equal to i0 for half-seams
  1278. assert(s1 != ~0u && remap[s1] == r1);
  1279. // additional asserts to verify that the seam pair is consistent
  1280. assert(kind != vertex_kind[i1] || s1 == wedge[i1]);
  1281. assert(loop[i0] == i1 || loopback[i0] == i1);
  1282. assert(loop[s0] == s1 || loopback[s0] == s1);
  1283. // note: this should never happen due to the assertion above, but when disabled if we ever hit this case we'll get a memory safety issue; for now play it safe
  1284. s1 = (s1 != ~0u) ? s1 : wedge[i1];
  1285. collapse_remap[i0] = i1;
  1286. collapse_remap[s0] = s1;
  1287. }
  1288. else
  1289. {
  1290. assert(wedge[i0] == i0);
  1291. collapse_remap[i0] = i1;
  1292. }
  1293. // note: we technically don't need to lock r1 if it's a locked vertex, as it can't move and its quadric won't be used
  1294. // however, this results in slightly worse error on some meshes because the locked collapses get an unfair advantage wrt scheduling
  1295. collapse_locked[r0] = 1;
  1296. collapse_locked[r1] = 1;
  1297. // border edges collapse 1 triangle, other edges collapse 2 or more
  1298. triangle_collapses += (kind == Kind_Border) ? 1 : 2;
  1299. edge_collapses++;
  1300. result_error = result_error < c.error ? c.error : result_error;
  1301. }
  1302. #if TRACE
  1303. float error_goal_last = edge_collapse_goal < collapse_count ? 1.5f * collapses[collapse_order[edge_collapse_goal]].error : FLT_MAX;
  1304. float error_goal_limit = error_goal_last < error_limit ? error_goal_last : error_limit;
  1305. printf("removed %d triangles, error %e (goal %e); evaluated %d/%d collapses (done %d, skipped %d, invalid %d); %s\n",
  1306. int(triangle_collapses), sqrtf(result_error), sqrtf(error_goal_limit),
  1307. int(stats[0]), int(collapse_count), int(edge_collapses), int(stats[1]), int(stats[2]),
  1308. stats[4] ? "error limit" : (stats[5] ? "count limit" : (stats[6] ? "error goal" : "out of collapses")));
  1309. #endif
  1310. return edge_collapses;
  1311. }
  1312. static void updateQuadrics(const unsigned int* collapse_remap, size_t vertex_count, Quadric* vertex_quadrics, QuadricGrad* volume_gradients, Quadric* attribute_quadrics, QuadricGrad* attribute_gradients, size_t attribute_count, const Vector3* vertex_positions, const unsigned int* remap, float& vertex_error)
  1313. {
  1314. for (size_t i = 0; i < vertex_count; ++i)
  1315. {
  1316. if (collapse_remap[i] == i)
  1317. continue;
  1318. unsigned int i0 = unsigned(i);
  1319. unsigned int i1 = collapse_remap[i];
  1320. unsigned int r0 = remap[i0];
  1321. unsigned int r1 = remap[i1];
  1322. // ensure we only update vertex_quadrics once: primary vertex must be moved if any wedge is moved
  1323. if (i0 == r0)
  1324. {
  1325. quadricAdd(vertex_quadrics[r1], vertex_quadrics[r0]);
  1326. if (volume_gradients)
  1327. quadricAdd(volume_gradients[r1], volume_gradients[r0]);
  1328. }
  1329. if (attribute_count)
  1330. {
  1331. quadricAdd(attribute_quadrics[i1], attribute_quadrics[i0]);
  1332. quadricAdd(&attribute_gradients[i1 * attribute_count], &attribute_gradients[i0 * attribute_count], attribute_count);
  1333. if (i0 == r0)
  1334. {
  1335. // when attributes are used, distance error needs to be recomputed as collapses don't track it; it is safe to do this after the quadric adjustment
  1336. float derr = quadricError(vertex_quadrics[r0], vertex_positions[r1]);
  1337. vertex_error = vertex_error < derr ? derr : vertex_error;
  1338. }
  1339. }
  1340. }
  1341. }
  1342. static void solvePositions(Vector3* vertex_positions, size_t vertex_count, const Quadric* vertex_quadrics, const QuadricGrad* volume_gradients, const Quadric* attribute_quadrics, const QuadricGrad* attribute_gradients, size_t attribute_count, const unsigned int* remap, const unsigned int* wedge, const EdgeAdjacency& adjacency, const unsigned char* vertex_kind, const unsigned char* vertex_update)
  1343. {
  1344. #if TRACE
  1345. size_t stats[6] = {};
  1346. #endif
  1347. for (size_t i = 0; i < vertex_count; ++i)
  1348. {
  1349. if (!vertex_update[i])
  1350. continue;
  1351. // moving vertices on an attribute discontinuity may result in extrapolating UV outside of the chart bounds
  1352. // moving vertices on a border requires a stronger edge quadric to preserve the border geometry
  1353. if (vertex_kind[i] == Kind_Locked || vertex_kind[i] == Kind_Seam || vertex_kind[i] == Kind_Border)
  1354. continue;
  1355. if (remap[i] != i)
  1356. {
  1357. vertex_positions[i] = vertex_positions[remap[i]];
  1358. continue;
  1359. }
  1360. TRACESTATS(0);
  1361. const Vector3& vp = vertex_positions[i];
  1362. Quadric Q = vertex_quadrics[i];
  1363. QuadricGrad GV = {};
  1364. // add a point quadric for regularization to stabilize the solution
  1365. Quadric R;
  1366. quadricFromPoint(R, vp.x, vp.y, vp.z, Q.w * 1e-4f);
  1367. quadricAdd(Q, R);
  1368. if (attribute_count)
  1369. {
  1370. // optimal point simultaneously minimizes attribute quadrics for all wedges
  1371. unsigned int v = unsigned(i);
  1372. do
  1373. {
  1374. quadricReduceAttributes(Q, attribute_quadrics[v], &attribute_gradients[v * attribute_count], attribute_count);
  1375. v = wedge[v];
  1376. } while (v != i);
  1377. // minimizing attribute quadrics results in volume loss so we incorporate volume gradient as a constraint
  1378. if (volume_gradients)
  1379. GV = volume_gradients[i];
  1380. }
  1381. Vector3 p;
  1382. if (!quadricSolve(p, Q, GV))
  1383. {
  1384. TRACESTATS(2);
  1385. continue;
  1386. }
  1387. // reject updates that move the vertex too far from its neighborhood
  1388. // this detects and fixes most cases when the quadric is not well-defined
  1389. float nr = getNeighborhoodRadius(adjacency, vertex_positions, unsigned(i));
  1390. float dp = (p.x - vp.x) * (p.x - vp.x) + (p.y - vp.y) * (p.y - vp.y) + (p.z - vp.z) * (p.z - vp.z);
  1391. if (dp > nr * nr)
  1392. {
  1393. TRACESTATS(3);
  1394. continue;
  1395. }
  1396. // reject updates that would flip a neighboring triangle, as we do for edge collapse
  1397. if (hasTriangleFlips(adjacency, vertex_positions, unsigned(i), p))
  1398. {
  1399. TRACESTATS(4);
  1400. continue;
  1401. }
  1402. // reject updates that increase positional error too much; allow some tolerance to improve attribute quality
  1403. if (quadricError(vertex_quadrics[i], p) > quadricError(vertex_quadrics[i], vp) * 1.5f + 1e-6f)
  1404. {
  1405. TRACESTATS(5);
  1406. continue;
  1407. }
  1408. TRACESTATS(1);
  1409. vertex_positions[i] = p;
  1410. }
  1411. #if TRACE
  1412. printf("updated %d/%d positions; failed solve %d bounds %d flip %d error %d\n", int(stats[1]), int(stats[0]), int(stats[2]), int(stats[3]), int(stats[4]), int(stats[5]));
  1413. #endif
  1414. }
  1415. static void solveAttributes(Vector3* vertex_positions, float* vertex_attributes, size_t vertex_count, const Quadric* attribute_quadrics, const QuadricGrad* attribute_gradients, size_t attribute_count, const unsigned int* remap, const unsigned int* wedge, const unsigned char* vertex_kind, const unsigned char* vertex_update)
  1416. {
  1417. for (size_t i = 0; i < vertex_count; ++i)
  1418. {
  1419. if (!vertex_update[i])
  1420. continue;
  1421. if (remap[i] != i)
  1422. continue;
  1423. for (size_t k = 0; k < attribute_count; ++k)
  1424. {
  1425. unsigned int shared = ~0u;
  1426. // for complex vertices, preserve attribute continuity and use highest weight wedge if values were shared
  1427. if (vertex_kind[i] == Kind_Complex)
  1428. {
  1429. shared = unsigned(i);
  1430. for (unsigned int v = wedge[i]; v != i; v = wedge[v])
  1431. if (vertex_attributes[v * attribute_count + k] != vertex_attributes[i * attribute_count + k])
  1432. shared = ~0u;
  1433. else if (shared != ~0u && attribute_quadrics[v].w > attribute_quadrics[shared].w)
  1434. shared = v;
  1435. }
  1436. // update attributes for all wedges
  1437. unsigned int v = unsigned(i);
  1438. do
  1439. {
  1440. unsigned int r = (shared == ~0u) ? v : shared;
  1441. const Vector3& p = vertex_positions[i]; // same for all wedges
  1442. const Quadric& A = attribute_quadrics[r];
  1443. const QuadricGrad& G = attribute_gradients[r * attribute_count + k];
  1444. float iw = A.w == 0 ? 0.f : 1.f / A.w;
  1445. float av = (G.gx * p.x + G.gy * p.y + G.gz * p.z + G.gw) * iw;
  1446. vertex_attributes[v * attribute_count + k] = av;
  1447. v = wedge[v];
  1448. } while (v != i);
  1449. }
  1450. }
  1451. }
  1452. static size_t remapIndexBuffer(unsigned int* indices, size_t index_count, const unsigned int* collapse_remap, const unsigned int* remap)
  1453. {
  1454. size_t write = 0;
  1455. for (size_t i = 0; i < index_count; i += 3)
  1456. {
  1457. unsigned int v0 = collapse_remap[indices[i + 0]];
  1458. unsigned int v1 = collapse_remap[indices[i + 1]];
  1459. unsigned int v2 = collapse_remap[indices[i + 2]];
  1460. // we never move the vertex twice during a single pass
  1461. assert(collapse_remap[v0] == v0);
  1462. assert(collapse_remap[v1] == v1);
  1463. assert(collapse_remap[v2] == v2);
  1464. // collapse zero area triangles even if they are not topologically degenerate
  1465. // this is required to cleanup manifold->seam collapses when a vertex is collapsed onto a seam pair
  1466. // as well as complex collapses and some other cases where cross wedge collapses are performed
  1467. unsigned int r0 = remap[v0];
  1468. unsigned int r1 = remap[v1];
  1469. unsigned int r2 = remap[v2];
  1470. if (r0 != r1 && r0 != r2 && r1 != r2)
  1471. {
  1472. indices[write + 0] = v0;
  1473. indices[write + 1] = v1;
  1474. indices[write + 2] = v2;
  1475. write += 3;
  1476. }
  1477. }
  1478. return write;
  1479. }
  1480. static void remapEdgeLoops(unsigned int* loop, size_t vertex_count, const unsigned int* collapse_remap)
  1481. {
  1482. for (size_t i = 0; i < vertex_count; ++i)
  1483. {
  1484. // note: this is a no-op for vertices that were remapped
  1485. // ideally we would clear the loop entries for those for consistency, even though they aren't going to be used
  1486. // however, the remapping process needs loop information for remapped vertices, so this would require a separate pass
  1487. if (loop[i] != ~0u)
  1488. {
  1489. unsigned int l = loop[i];
  1490. unsigned int r = collapse_remap[l];
  1491. // i == r is a special case when the seam edge is collapsed in a direction opposite to where loop goes
  1492. if (i == r)
  1493. loop[i] = (loop[l] != ~0u) ? collapse_remap[loop[l]] : ~0u;
  1494. else
  1495. loop[i] = r;
  1496. }
  1497. }
  1498. }
  1499. static unsigned int follow(unsigned int* parents, unsigned int index)
  1500. {
  1501. while (index != parents[index])
  1502. {
  1503. unsigned int parent = parents[index];
  1504. parents[index] = parents[parent];
  1505. index = parent;
  1506. }
  1507. return index;
  1508. }
  1509. static size_t buildComponents(unsigned int* components, size_t vertex_count, const unsigned int* indices, size_t index_count, const unsigned int* remap)
  1510. {
  1511. for (size_t i = 0; i < vertex_count; ++i)
  1512. components[i] = unsigned(i);
  1513. // compute a unique (but not sequential!) index for each component via union-find
  1514. for (size_t i = 0; i < index_count; i += 3)
  1515. {
  1516. static const int next[4] = {1, 2, 0, 1};
  1517. for (int e = 0; e < 3; ++e)
  1518. {
  1519. unsigned int i0 = indices[i + e];
  1520. unsigned int i1 = indices[i + next[e]];
  1521. unsigned int r0 = remap[i0];
  1522. unsigned int r1 = remap[i1];
  1523. r0 = follow(components, r0);
  1524. r1 = follow(components, r1);
  1525. // merge components with larger indices into components with smaller indices
  1526. // this guarantees that the root of the component is always the one with the smallest index
  1527. if (r0 != r1)
  1528. components[r0 < r1 ? r1 : r0] = r0 < r1 ? r0 : r1;
  1529. }
  1530. }
  1531. // make sure each element points to the component root *before* we renumber the components
  1532. for (size_t i = 0; i < vertex_count; ++i)
  1533. if (remap[i] == i)
  1534. components[i] = follow(components, unsigned(i));
  1535. unsigned int next_component = 0;
  1536. // renumber components using sequential indices
  1537. // a sequential pass is sufficient because component root always has the smallest index
  1538. // note: it is unsafe to use follow() in this pass because we're replacing component links with sequential indices inplace
  1539. for (size_t i = 0; i < vertex_count; ++i)
  1540. {
  1541. if (remap[i] == i)
  1542. {
  1543. unsigned int root = components[i];
  1544. assert(root <= i); // make sure we already computed the component for non-roots
  1545. components[i] = (root == i) ? next_component++ : components[root];
  1546. }
  1547. else
  1548. {
  1549. assert(remap[i] < i); // make sure we already computed the component
  1550. components[i] = components[remap[i]];
  1551. }
  1552. }
  1553. return next_component;
  1554. }
  1555. static void measureComponents(float* component_errors, size_t component_count, const unsigned int* components, const Vector3* vertex_positions, size_t vertex_count)
  1556. {
  1557. memset(component_errors, 0, component_count * 4 * sizeof(float));
  1558. // compute approximate sphere center for each component as an average
  1559. for (size_t i = 0; i < vertex_count; ++i)
  1560. {
  1561. unsigned int c = components[i];
  1562. assert(components[i] < component_count);
  1563. Vector3 v = vertex_positions[i]; // copy avoids aliasing issues
  1564. component_errors[c * 4 + 0] += v.x;
  1565. component_errors[c * 4 + 1] += v.y;
  1566. component_errors[c * 4 + 2] += v.z;
  1567. component_errors[c * 4 + 3] += 1; // weight
  1568. }
  1569. // complete the center computation, and reinitialize [3] as a radius
  1570. for (size_t i = 0; i < component_count; ++i)
  1571. {
  1572. float w = component_errors[i * 4 + 3];
  1573. float iw = w == 0.f ? 0.f : 1.f / w;
  1574. component_errors[i * 4 + 0] *= iw;
  1575. component_errors[i * 4 + 1] *= iw;
  1576. component_errors[i * 4 + 2] *= iw;
  1577. component_errors[i * 4 + 3] = 0; // radius
  1578. }
  1579. // compute squared radius for each component
  1580. for (size_t i = 0; i < vertex_count; ++i)
  1581. {
  1582. unsigned int c = components[i];
  1583. float dx = vertex_positions[i].x - component_errors[c * 4 + 0];
  1584. float dy = vertex_positions[i].y - component_errors[c * 4 + 1];
  1585. float dz = vertex_positions[i].z - component_errors[c * 4 + 2];
  1586. float r = dx * dx + dy * dy + dz * dz;
  1587. component_errors[c * 4 + 3] = component_errors[c * 4 + 3] < r ? r : component_errors[c * 4 + 3];
  1588. }
  1589. // we've used the output buffer as scratch space, so we need to move the results to proper indices
  1590. for (size_t i = 0; i < component_count; ++i)
  1591. {
  1592. #if TRACE >= 2
  1593. printf("component %d: center %f %f %f, error %e\n", int(i),
  1594. component_errors[i * 4 + 0], component_errors[i * 4 + 1], component_errors[i * 4 + 2], sqrtf(component_errors[i * 4 + 3]));
  1595. #endif
  1596. // note: we keep the squared error to make it match quadric error metric
  1597. component_errors[i] = component_errors[i * 4 + 3];
  1598. }
  1599. }
  1600. static size_t pruneComponents(unsigned int* indices, size_t index_count, const unsigned int* components, const float* component_errors, size_t component_count, float error_cutoff, float& nexterror)
  1601. {
  1602. (void)component_count;
  1603. size_t write = 0;
  1604. float min_error = FLT_MAX;
  1605. for (size_t i = 0; i < index_count; i += 3)
  1606. {
  1607. unsigned int v0 = indices[i + 0], v1 = indices[i + 1], v2 = indices[i + 2];
  1608. unsigned int c = components[v0];
  1609. assert(c == components[v1] && c == components[v2]);
  1610. if (component_errors[c] > error_cutoff)
  1611. {
  1612. min_error = min_error > component_errors[c] ? component_errors[c] : min_error;
  1613. indices[write + 0] = v0;
  1614. indices[write + 1] = v1;
  1615. indices[write + 2] = v2;
  1616. write += 3;
  1617. }
  1618. }
  1619. #if TRACE
  1620. size_t pruned_components = 0;
  1621. for (size_t i = 0; i < component_count; ++i)
  1622. pruned_components += (component_errors[i] >= nexterror && component_errors[i] <= error_cutoff);
  1623. printf("pruned %d triangles in %d components (goal %e); next %e\n", int((index_count - write) / 3), int(pruned_components), sqrtf(error_cutoff), min_error < FLT_MAX ? sqrtf(min_error) : min_error * 2);
  1624. #endif
  1625. // update next error with the smallest error of the remaining components
  1626. nexterror = min_error;
  1627. return write;
  1628. }
  1629. struct CellHasher
  1630. {
  1631. const unsigned int* vertex_ids;
  1632. size_t hash(unsigned int i) const
  1633. {
  1634. unsigned int h = vertex_ids[i];
  1635. // MurmurHash2 finalizer
  1636. h ^= h >> 13;
  1637. h *= 0x5bd1e995;
  1638. h ^= h >> 15;
  1639. return h;
  1640. }
  1641. bool equal(unsigned int lhs, unsigned int rhs) const
  1642. {
  1643. return vertex_ids[lhs] == vertex_ids[rhs];
  1644. }
  1645. };
  1646. struct IdHasher
  1647. {
  1648. size_t hash(unsigned int id) const
  1649. {
  1650. unsigned int h = id;
  1651. // MurmurHash2 finalizer
  1652. h ^= h >> 13;
  1653. h *= 0x5bd1e995;
  1654. h ^= h >> 15;
  1655. return h;
  1656. }
  1657. bool equal(unsigned int lhs, unsigned int rhs) const
  1658. {
  1659. return lhs == rhs;
  1660. }
  1661. };
  1662. struct TriangleHasher
  1663. {
  1664. const unsigned int* indices;
  1665. size_t hash(unsigned int i) const
  1666. {
  1667. const unsigned int* tri = indices + i * 3;
  1668. // Optimized Spatial Hashing for Collision Detection of Deformable Objects
  1669. return (tri[0] * 73856093) ^ (tri[1] * 19349663) ^ (tri[2] * 83492791);
  1670. }
  1671. bool equal(unsigned int lhs, unsigned int rhs) const
  1672. {
  1673. const unsigned int* lt = indices + lhs * 3;
  1674. const unsigned int* rt = indices + rhs * 3;
  1675. return lt[0] == rt[0] && lt[1] == rt[1] && lt[2] == rt[2];
  1676. }
  1677. };
  1678. static void computeVertexIds(unsigned int* vertex_ids, const Vector3* vertex_positions, const unsigned char* vertex_lock, size_t vertex_count, int grid_size)
  1679. {
  1680. assert(grid_size >= 1 && grid_size <= 1024);
  1681. float cell_scale = float(grid_size - 1);
  1682. for (size_t i = 0; i < vertex_count; ++i)
  1683. {
  1684. const Vector3& v = vertex_positions[i];
  1685. int xi = int(v.x * cell_scale + 0.5f);
  1686. int yi = int(v.y * cell_scale + 0.5f);
  1687. int zi = int(v.z * cell_scale + 0.5f);
  1688. if (vertex_lock && (vertex_lock[i] & meshopt_SimplifyVertex_Lock))
  1689. vertex_ids[i] = (1 << 30) | unsigned(i);
  1690. else
  1691. vertex_ids[i] = (xi << 20) | (yi << 10) | zi;
  1692. }
  1693. }
  1694. static size_t countTriangles(const unsigned int* vertex_ids, const unsigned int* indices, size_t index_count)
  1695. {
  1696. size_t result = 0;
  1697. for (size_t i = 0; i < index_count; i += 3)
  1698. {
  1699. unsigned int id0 = vertex_ids[indices[i + 0]];
  1700. unsigned int id1 = vertex_ids[indices[i + 1]];
  1701. unsigned int id2 = vertex_ids[indices[i + 2]];
  1702. result += (id0 != id1) & (id0 != id2) & (id1 != id2);
  1703. }
  1704. return result;
  1705. }
  1706. static size_t fillVertexCells(unsigned int* table, size_t table_size, unsigned int* vertex_cells, const unsigned int* vertex_ids, size_t vertex_count)
  1707. {
  1708. CellHasher hasher = {vertex_ids};
  1709. memset(table, -1, table_size * sizeof(unsigned int));
  1710. size_t result = 0;
  1711. for (size_t i = 0; i < vertex_count; ++i)
  1712. {
  1713. unsigned int* entry = hashLookup2(table, table_size, hasher, unsigned(i), ~0u);
  1714. if (*entry == ~0u)
  1715. {
  1716. *entry = unsigned(i);
  1717. vertex_cells[i] = unsigned(result++);
  1718. }
  1719. else
  1720. {
  1721. vertex_cells[i] = vertex_cells[*entry];
  1722. }
  1723. }
  1724. return result;
  1725. }
  1726. static size_t countVertexCells(unsigned int* table, size_t table_size, const unsigned int* vertex_ids, size_t vertex_count)
  1727. {
  1728. IdHasher hasher;
  1729. memset(table, -1, table_size * sizeof(unsigned int));
  1730. size_t result = 0;
  1731. for (size_t i = 0; i < vertex_count; ++i)
  1732. {
  1733. unsigned int id = vertex_ids[i];
  1734. unsigned int* entry = hashLookup2(table, table_size, hasher, id, ~0u);
  1735. result += (*entry == ~0u);
  1736. *entry = id;
  1737. }
  1738. return result;
  1739. }
  1740. static void fillCellQuadrics(Quadric* cell_quadrics, const unsigned int* indices, size_t index_count, const Vector3* vertex_positions, const unsigned int* vertex_cells)
  1741. {
  1742. for (size_t i = 0; i < index_count; i += 3)
  1743. {
  1744. unsigned int i0 = indices[i + 0];
  1745. unsigned int i1 = indices[i + 1];
  1746. unsigned int i2 = indices[i + 2];
  1747. unsigned int c0 = vertex_cells[i0];
  1748. unsigned int c1 = vertex_cells[i1];
  1749. unsigned int c2 = vertex_cells[i2];
  1750. int single_cell = (c0 == c1) & (c0 == c2);
  1751. Quadric Q;
  1752. quadricFromTriangle(Q, vertex_positions[i0], vertex_positions[i1], vertex_positions[i2], single_cell ? 3.f : 1.f);
  1753. if (single_cell)
  1754. {
  1755. quadricAdd(cell_quadrics[c0], Q);
  1756. }
  1757. else
  1758. {
  1759. quadricAdd(cell_quadrics[c0], Q);
  1760. quadricAdd(cell_quadrics[c1], Q);
  1761. quadricAdd(cell_quadrics[c2], Q);
  1762. }
  1763. }
  1764. }
  1765. static void fillCellReservoirs(Reservoir* cell_reservoirs, size_t cell_count, const Vector3* vertex_positions, const float* vertex_colors, size_t vertex_colors_stride, size_t vertex_count, const unsigned int* vertex_cells)
  1766. {
  1767. static const float dummy_color[] = {0.f, 0.f, 0.f};
  1768. size_t vertex_colors_stride_float = vertex_colors_stride / sizeof(float);
  1769. for (size_t i = 0; i < vertex_count; ++i)
  1770. {
  1771. unsigned int cell = vertex_cells[i];
  1772. const Vector3& v = vertex_positions[i];
  1773. Reservoir& r = cell_reservoirs[cell];
  1774. const float* color = vertex_colors ? &vertex_colors[i * vertex_colors_stride_float] : dummy_color;
  1775. r.x += v.x;
  1776. r.y += v.y;
  1777. r.z += v.z;
  1778. r.r += color[0];
  1779. r.g += color[1];
  1780. r.b += color[2];
  1781. r.w += 1.f;
  1782. }
  1783. for (size_t i = 0; i < cell_count; ++i)
  1784. {
  1785. Reservoir& r = cell_reservoirs[i];
  1786. float iw = r.w == 0.f ? 0.f : 1.f / r.w;
  1787. r.x *= iw;
  1788. r.y *= iw;
  1789. r.z *= iw;
  1790. r.r *= iw;
  1791. r.g *= iw;
  1792. r.b *= iw;
  1793. }
  1794. }
  1795. static void fillCellRemap(unsigned int* cell_remap, float* cell_errors, size_t cell_count, const unsigned int* vertex_cells, const Quadric* cell_quadrics, const Vector3* vertex_positions, size_t vertex_count)
  1796. {
  1797. memset(cell_remap, -1, cell_count * sizeof(unsigned int));
  1798. for (size_t i = 0; i < vertex_count; ++i)
  1799. {
  1800. unsigned int cell = vertex_cells[i];
  1801. float error = quadricError(cell_quadrics[cell], vertex_positions[i]);
  1802. if (cell_remap[cell] == ~0u || cell_errors[cell] > error)
  1803. {
  1804. cell_remap[cell] = unsigned(i);
  1805. cell_errors[cell] = error;
  1806. }
  1807. }
  1808. }
  1809. static void fillCellRemap(unsigned int* cell_remap, float* cell_errors, size_t cell_count, const unsigned int* vertex_cells, const Reservoir* cell_reservoirs, const Vector3* vertex_positions, const float* vertex_colors, size_t vertex_colors_stride, float color_weight, size_t vertex_count)
  1810. {
  1811. static const float dummy_color[] = {0.f, 0.f, 0.f};
  1812. size_t vertex_colors_stride_float = vertex_colors_stride / sizeof(float);
  1813. memset(cell_remap, -1, cell_count * sizeof(unsigned int));
  1814. for (size_t i = 0; i < vertex_count; ++i)
  1815. {
  1816. unsigned int cell = vertex_cells[i];
  1817. const Vector3& v = vertex_positions[i];
  1818. const Reservoir& r = cell_reservoirs[cell];
  1819. const float* color = vertex_colors ? &vertex_colors[i * vertex_colors_stride_float] : dummy_color;
  1820. float pos_error = (v.x - r.x) * (v.x - r.x) + (v.y - r.y) * (v.y - r.y) + (v.z - r.z) * (v.z - r.z);
  1821. float col_error = (color[0] - r.r) * (color[0] - r.r) + (color[1] - r.g) * (color[1] - r.g) + (color[2] - r.b) * (color[2] - r.b);
  1822. float error = pos_error + color_weight * col_error;
  1823. if (cell_remap[cell] == ~0u || cell_errors[cell] > error)
  1824. {
  1825. cell_remap[cell] = unsigned(i);
  1826. cell_errors[cell] = error;
  1827. }
  1828. }
  1829. }
  1830. static size_t filterTriangles(unsigned int* destination, unsigned int* tritable, size_t tritable_size, const unsigned int* indices, size_t index_count, const unsigned int* vertex_cells, const unsigned int* cell_remap)
  1831. {
  1832. TriangleHasher hasher = {destination};
  1833. memset(tritable, -1, tritable_size * sizeof(unsigned int));
  1834. size_t result = 0;
  1835. for (size_t i = 0; i < index_count; i += 3)
  1836. {
  1837. unsigned int c0 = vertex_cells[indices[i + 0]];
  1838. unsigned int c1 = vertex_cells[indices[i + 1]];
  1839. unsigned int c2 = vertex_cells[indices[i + 2]];
  1840. if (c0 != c1 && c0 != c2 && c1 != c2)
  1841. {
  1842. unsigned int a = cell_remap[c0];
  1843. unsigned int b = cell_remap[c1];
  1844. unsigned int c = cell_remap[c2];
  1845. if (b < a && b < c)
  1846. {
  1847. unsigned int t = a;
  1848. a = b, b = c, c = t;
  1849. }
  1850. else if (c < a && c < b)
  1851. {
  1852. unsigned int t = c;
  1853. c = b, b = a, a = t;
  1854. }
  1855. destination[result * 3 + 0] = a;
  1856. destination[result * 3 + 1] = b;
  1857. destination[result * 3 + 2] = c;
  1858. unsigned int* entry = hashLookup2(tritable, tritable_size, hasher, unsigned(result), ~0u);
  1859. if (*entry == ~0u)
  1860. *entry = unsigned(result++);
  1861. }
  1862. }
  1863. return result * 3;
  1864. }
  1865. static float interpolate(float y, float x0, float y0, float x1, float y1, float x2, float y2)
  1866. {
  1867. // three point interpolation from "revenge of interpolation search" paper
  1868. float num = (y1 - y) * (x1 - x2) * (x1 - x0) * (y2 - y0);
  1869. float den = (y2 - y) * (x1 - x2) * (y0 - y1) + (y0 - y) * (x1 - x0) * (y1 - y2);
  1870. return x1 + (den == 0.f ? 0.f : num / den);
  1871. }
  1872. } // namespace meshopt
  1873. // Note: this is only exposed for development purposes; do *not* use
  1874. enum
  1875. {
  1876. meshopt_SimplifyInternalSolve = 1 << 29,
  1877. meshopt_SimplifyInternalDebug = 1 << 30
  1878. };
  1879. size_t meshopt_simplifyEdge(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions_data, size_t vertex_count, size_t vertex_positions_stride, const float* vertex_attributes_data, size_t vertex_attributes_stride, const float* attribute_weights, size_t attribute_count, const unsigned char* vertex_lock, size_t target_index_count, float target_error, unsigned int options, float* out_result_error)
  1880. {
  1881. using namespace meshopt;
  1882. assert(index_count % 3 == 0);
  1883. assert(vertex_positions_stride >= 12 && vertex_positions_stride <= 256);
  1884. assert(vertex_positions_stride % sizeof(float) == 0);
  1885. assert(target_index_count <= index_count);
  1886. assert(target_error >= 0);
  1887. assert((options & ~(meshopt_SimplifyLockBorder | meshopt_SimplifySparse | meshopt_SimplifyErrorAbsolute | meshopt_SimplifyPrune | meshopt_SimplifyRegularize | meshopt_SimplifyPermissive | meshopt_SimplifyInternalSolve | meshopt_SimplifyInternalDebug)) == 0);
  1888. assert(vertex_attributes_stride >= attribute_count * sizeof(float) && vertex_attributes_stride <= 256);
  1889. assert(vertex_attributes_stride % sizeof(float) == 0);
  1890. assert(attribute_count <= kMaxAttributes);
  1891. for (size_t i = 0; i < attribute_count; ++i)
  1892. assert(attribute_weights[i] >= 0);
  1893. meshopt_Allocator allocator;
  1894. unsigned int* result = destination;
  1895. if (result != indices)
  1896. memcpy(result, indices, index_count * sizeof(unsigned int));
  1897. // build an index remap and update indices/vertex_count to minimize the subsequent work
  1898. // note: as a consequence, errors will be computed relative to the subset extent
  1899. unsigned int* sparse_remap = NULL;
  1900. if (options & meshopt_SimplifySparse)
  1901. sparse_remap = buildSparseRemap(result, index_count, vertex_count, &vertex_count, allocator);
  1902. // build adjacency information
  1903. EdgeAdjacency adjacency = {};
  1904. prepareEdgeAdjacency(adjacency, index_count, vertex_count, allocator);
  1905. updateEdgeAdjacency(adjacency, result, index_count, vertex_count, NULL);
  1906. // build position remap that maps each vertex to the one with identical position
  1907. // wedge table stores next vertex with identical position for each vertex
  1908. unsigned int* remap = allocator.allocate<unsigned int>(vertex_count);
  1909. unsigned int* wedge = allocator.allocate<unsigned int>(vertex_count);
  1910. buildPositionRemap(remap, wedge, vertex_positions_data, vertex_count, vertex_positions_stride, sparse_remap, allocator);
  1911. // classify vertices; vertex kind determines collapse rules, see kCanCollapse
  1912. unsigned char* vertex_kind = allocator.allocate<unsigned char>(vertex_count);
  1913. unsigned int* loop = allocator.allocate<unsigned int>(vertex_count);
  1914. unsigned int* loopback = allocator.allocate<unsigned int>(vertex_count);
  1915. classifyVertices(vertex_kind, loop, loopback, vertex_count, adjacency, remap, wedge, vertex_lock, sparse_remap, options);
  1916. #if TRACE
  1917. size_t unique_positions = 0;
  1918. for (size_t i = 0; i < vertex_count; ++i)
  1919. unique_positions += remap[i] == i;
  1920. printf("position remap: %d vertices => %d positions\n", int(vertex_count), int(unique_positions));
  1921. size_t kinds[Kind_Count] = {};
  1922. for (size_t i = 0; i < vertex_count; ++i)
  1923. kinds[vertex_kind[i]] += remap[i] == i;
  1924. printf("kinds: manifold %d, border %d, seam %d, complex %d, locked %d\n",
  1925. int(kinds[Kind_Manifold]), int(kinds[Kind_Border]), int(kinds[Kind_Seam]), int(kinds[Kind_Complex]), int(kinds[Kind_Locked]));
  1926. #endif
  1927. Vector3* vertex_positions = allocator.allocate<Vector3>(vertex_count);
  1928. float vertex_offset[3] = {};
  1929. float vertex_scale = rescalePositions(vertex_positions, vertex_positions_data, vertex_count, vertex_positions_stride, sparse_remap, vertex_offset);
  1930. float* vertex_attributes = NULL;
  1931. unsigned int attribute_remap[kMaxAttributes];
  1932. if (attribute_count)
  1933. {
  1934. // remap attributes to only include ones with weight > 0 to minimize memory/compute overhead for quadrics
  1935. size_t attributes_used = 0;
  1936. for (size_t i = 0; i < attribute_count; ++i)
  1937. if (attribute_weights[i] > 0)
  1938. attribute_remap[attributes_used++] = unsigned(i);
  1939. attribute_count = attributes_used;
  1940. vertex_attributes = allocator.allocate<float>(vertex_count * attribute_count);
  1941. rescaleAttributes(vertex_attributes, vertex_attributes_data, vertex_count, vertex_attributes_stride, attribute_weights, attribute_count, attribute_remap, sparse_remap);
  1942. }
  1943. Quadric* vertex_quadrics = allocator.allocate<Quadric>(vertex_count);
  1944. memset(vertex_quadrics, 0, vertex_count * sizeof(Quadric));
  1945. Quadric* attribute_quadrics = NULL;
  1946. QuadricGrad* attribute_gradients = NULL;
  1947. QuadricGrad* volume_gradients = NULL;
  1948. if (attribute_count)
  1949. {
  1950. attribute_quadrics = allocator.allocate<Quadric>(vertex_count);
  1951. memset(attribute_quadrics, 0, vertex_count * sizeof(Quadric));
  1952. attribute_gradients = allocator.allocate<QuadricGrad>(vertex_count * attribute_count);
  1953. memset(attribute_gradients, 0, vertex_count * attribute_count * sizeof(QuadricGrad));
  1954. if (options & meshopt_SimplifyInternalSolve)
  1955. {
  1956. volume_gradients = allocator.allocate<QuadricGrad>(vertex_count);
  1957. memset(volume_gradients, 0, vertex_count * sizeof(QuadricGrad));
  1958. }
  1959. }
  1960. fillFaceQuadrics(vertex_quadrics, volume_gradients, result, index_count, vertex_positions, remap);
  1961. fillVertexQuadrics(vertex_quadrics, vertex_positions, vertex_count, remap, options);
  1962. fillEdgeQuadrics(vertex_quadrics, result, index_count, vertex_positions, remap, vertex_kind, loop, loopback);
  1963. if (attribute_count)
  1964. fillAttributeQuadrics(attribute_quadrics, attribute_gradients, result, index_count, vertex_positions, vertex_attributes, attribute_count);
  1965. unsigned int* components = NULL;
  1966. float* component_errors = NULL;
  1967. size_t component_count = 0;
  1968. float component_nexterror = 0;
  1969. if (options & meshopt_SimplifyPrune)
  1970. {
  1971. components = allocator.allocate<unsigned int>(vertex_count);
  1972. component_count = buildComponents(components, vertex_count, result, index_count, remap);
  1973. component_errors = allocator.allocate<float>(component_count * 4); // overallocate for temporary use inside measureComponents
  1974. measureComponents(component_errors, component_count, components, vertex_positions, vertex_count);
  1975. component_nexterror = FLT_MAX;
  1976. for (size_t i = 0; i < component_count; ++i)
  1977. component_nexterror = component_nexterror > component_errors[i] ? component_errors[i] : component_nexterror;
  1978. #if TRACE
  1979. printf("components: %d (min error %e)\n", int(component_count), sqrtf(component_nexterror));
  1980. #endif
  1981. }
  1982. #if TRACE
  1983. size_t pass_count = 0;
  1984. #endif
  1985. size_t collapse_capacity = boundEdgeCollapses(adjacency, vertex_count, index_count, vertex_kind);
  1986. Collapse* edge_collapses = allocator.allocate<Collapse>(collapse_capacity);
  1987. unsigned int* collapse_order = allocator.allocate<unsigned int>(collapse_capacity);
  1988. unsigned int* collapse_remap = allocator.allocate<unsigned int>(vertex_count);
  1989. unsigned char* collapse_locked = allocator.allocate<unsigned char>(vertex_count);
  1990. size_t result_count = index_count;
  1991. float result_error = 0;
  1992. float vertex_error = 0;
  1993. // target_error input is linear; we need to adjust it to match quadricError units
  1994. float error_scale = (options & meshopt_SimplifyErrorAbsolute) ? vertex_scale : 1.f;
  1995. float error_limit = (target_error * target_error) / (error_scale * error_scale);
  1996. while (result_count > target_index_count)
  1997. {
  1998. // note: throughout the simplification process adjacency structure reflects welded topology for result-in-progress
  1999. updateEdgeAdjacency(adjacency, result, result_count, vertex_count, remap);
  2000. size_t edge_collapse_count = pickEdgeCollapses(edge_collapses, collapse_capacity, result, result_count, remap, vertex_kind, loop, loopback);
  2001. assert(edge_collapse_count <= collapse_capacity);
  2002. // no edges can be collapsed any more due to topology restrictions
  2003. if (edge_collapse_count == 0)
  2004. break;
  2005. #if TRACE
  2006. printf("pass %d:%c", int(pass_count++), TRACE >= 2 ? '\n' : ' ');
  2007. #endif
  2008. rankEdgeCollapses(edge_collapses, edge_collapse_count, vertex_positions, vertex_attributes, vertex_quadrics, attribute_quadrics, attribute_gradients, attribute_count, remap, wedge, vertex_kind, loop, loopback);
  2009. sortEdgeCollapses(collapse_order, edge_collapses, edge_collapse_count);
  2010. size_t triangle_collapse_goal = (result_count - target_index_count) / 3;
  2011. for (size_t i = 0; i < vertex_count; ++i)
  2012. collapse_remap[i] = unsigned(i);
  2013. memset(collapse_locked, 0, vertex_count);
  2014. size_t collapses = performEdgeCollapses(collapse_remap, collapse_locked, edge_collapses, edge_collapse_count, collapse_order, remap, wedge, vertex_kind, loop, loopback, vertex_positions, adjacency, triangle_collapse_goal, error_limit, result_error);
  2015. // no edges can be collapsed any more due to hitting the error limit or triangle collapse limit
  2016. if (collapses == 0)
  2017. break;
  2018. updateQuadrics(collapse_remap, vertex_count, vertex_quadrics, volume_gradients, attribute_quadrics, attribute_gradients, attribute_count, vertex_positions, remap, vertex_error);
  2019. // updateQuadrics will update vertex error if we use attributes, but if we don't then result_error and vertex_error are equivalent
  2020. vertex_error = attribute_count == 0 ? result_error : vertex_error;
  2021. // note: we update loops following edge collapses, but after this we might still have stale loop data
  2022. // this can happen when a triangle with a loop edge gets collapsed along a non-loop edge
  2023. // that works since a loop that points to a vertex that is no longer connected is not affecting collapse logic
  2024. remapEdgeLoops(loop, vertex_count, collapse_remap);
  2025. remapEdgeLoops(loopback, vertex_count, collapse_remap);
  2026. result_count = remapIndexBuffer(result, result_count, collapse_remap, remap);
  2027. if ((options & meshopt_SimplifyPrune) && result_count > target_index_count && component_nexterror <= vertex_error)
  2028. result_count = pruneComponents(result, result_count, components, component_errors, component_count, vertex_error, component_nexterror);
  2029. }
  2030. // at this point, component_nexterror might be stale: component it references may have been removed through a series of edge collapses
  2031. bool component_nextstale = true;
  2032. // we're done with the regular simplification but we're still short of the target; try pruning more aggressively towards error_limit
  2033. while ((options & meshopt_SimplifyPrune) && result_count > target_index_count && component_nexterror <= error_limit)
  2034. {
  2035. #if TRACE
  2036. printf("pass %d: cleanup; ", int(pass_count++));
  2037. #endif
  2038. float component_cutoff = component_nexterror * 1.5f < error_limit ? component_nexterror * 1.5f : error_limit;
  2039. // track maximum error in eligible components as we are increasing resulting error
  2040. float component_maxerror = 0;
  2041. for (size_t i = 0; i < component_count; ++i)
  2042. if (component_errors[i] > component_maxerror && component_errors[i] <= component_cutoff)
  2043. component_maxerror = component_errors[i];
  2044. size_t new_count = pruneComponents(result, result_count, components, component_errors, component_count, component_cutoff, component_nexterror);
  2045. if (new_count == result_count && !component_nextstale)
  2046. break;
  2047. component_nextstale = false; // pruneComponents guarantees next error is up to date
  2048. result_count = new_count;
  2049. result_error = result_error < component_maxerror ? component_maxerror : result_error;
  2050. vertex_error = vertex_error < component_maxerror ? component_maxerror : vertex_error;
  2051. }
  2052. #if TRACE
  2053. printf("result: %d triangles, error: %e (pos %.3e); total %d passes\n", int(result_count / 3), sqrtf(result_error), sqrtf(vertex_error), int(pass_count));
  2054. #endif
  2055. // if solve is requested, update input buffers destructively from internal data
  2056. if (options & meshopt_SimplifyInternalSolve)
  2057. {
  2058. unsigned char* vertex_update = collapse_locked; // reuse as scratch space
  2059. memset(vertex_update, 0, vertex_count);
  2060. // limit quadric solve to vertices that are still used in the result
  2061. for (size_t i = 0; i < result_count; ++i)
  2062. {
  2063. unsigned int v = result[i];
  2064. // mark the vertex for finalizeVertices and root vertex for solve*
  2065. vertex_update[remap[v]] = vertex_update[v] = 1;
  2066. }
  2067. // edge adjacency may be stale as we haven't updated it after last series of edge collapses
  2068. updateEdgeAdjacency(adjacency, result, result_count, vertex_count, remap);
  2069. solvePositions(vertex_positions, vertex_count, vertex_quadrics, volume_gradients, attribute_quadrics, attribute_gradients, attribute_count, remap, wedge, adjacency, vertex_kind, vertex_update);
  2070. if (attribute_count)
  2071. solveAttributes(vertex_positions, vertex_attributes, vertex_count, attribute_quadrics, attribute_gradients, attribute_count, remap, wedge, vertex_kind, vertex_update);
  2072. finalizeVertices(const_cast<float*>(vertex_positions_data), vertex_positions_stride, const_cast<float*>(vertex_attributes_data), vertex_attributes_stride, attribute_weights, attribute_count, vertex_count, vertex_positions, vertex_attributes, sparse_remap, attribute_remap, vertex_scale, vertex_offset, vertex_kind, vertex_update, vertex_lock);
  2073. }
  2074. // if debug visualization data is requested, fill it instead of index data; for simplicity, this doesn't work with sparsity
  2075. if ((options & meshopt_SimplifyInternalDebug) && !sparse_remap)
  2076. {
  2077. assert(Kind_Count <= 8 && vertex_count < (1 << 28)); // 3 bit kind, 1 bit loop
  2078. for (size_t i = 0; i < result_count; i += 3)
  2079. {
  2080. unsigned int a = result[i + 0], b = result[i + 1], c = result[i + 2];
  2081. result[i + 0] |= (vertex_kind[a] << 28) | (unsigned(loop[a] == b || loopback[b] == a) << 31);
  2082. result[i + 1] |= (vertex_kind[b] << 28) | (unsigned(loop[b] == c || loopback[c] == b) << 31);
  2083. result[i + 2] |= (vertex_kind[c] << 28) | (unsigned(loop[c] == a || loopback[a] == c) << 31);
  2084. }
  2085. }
  2086. // convert resulting indices back into the dense space of the larger mesh
  2087. if (sparse_remap)
  2088. for (size_t i = 0; i < result_count; ++i)
  2089. result[i] = sparse_remap[result[i]];
  2090. // result_error is quadratic; we need to remap it back to linear
  2091. if (out_result_error)
  2092. *out_result_error = sqrtf(result_error) * error_scale;
  2093. return result_count;
  2094. }
  2095. size_t meshopt_simplify(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions_data, size_t vertex_count, size_t vertex_positions_stride, size_t target_index_count, float target_error, unsigned int options, float* out_result_error)
  2096. {
  2097. assert((options & meshopt_SimplifyInternalSolve) == 0); // use meshopt_simplifyWithUpdate instead
  2098. return meshopt_simplifyEdge(destination, indices, index_count, vertex_positions_data, vertex_count, vertex_positions_stride, NULL, 0, NULL, 0, NULL, target_index_count, target_error, options, out_result_error);
  2099. }
  2100. size_t meshopt_simplifyWithAttributes(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions_data, size_t vertex_count, size_t vertex_positions_stride, const float* vertex_attributes_data, size_t vertex_attributes_stride, const float* attribute_weights, size_t attribute_count, const unsigned char* vertex_lock, size_t target_index_count, float target_error, unsigned int options, float* out_result_error)
  2101. {
  2102. assert((options & meshopt_SimplifyInternalSolve) == 0); // use meshopt_simplifyWithUpdate instead
  2103. return meshopt_simplifyEdge(destination, indices, index_count, vertex_positions_data, vertex_count, vertex_positions_stride, vertex_attributes_data, vertex_attributes_stride, attribute_weights, attribute_count, vertex_lock, target_index_count, target_error, options, out_result_error);
  2104. }
  2105. size_t meshopt_simplifyWithUpdate(unsigned int* indices, size_t index_count, float* vertex_positions_data, size_t vertex_count, size_t vertex_positions_stride, float* vertex_attributes_data, size_t vertex_attributes_stride, const float* attribute_weights, size_t attribute_count, const unsigned char* vertex_lock, size_t target_index_count, float target_error, unsigned int options, float* out_result_error)
  2106. {
  2107. return meshopt_simplifyEdge(indices, indices, index_count, vertex_positions_data, vertex_count, vertex_positions_stride, vertex_attributes_data, vertex_attributes_stride, attribute_weights, attribute_count, vertex_lock, target_index_count, target_error, options | meshopt_SimplifyInternalSolve, out_result_error);
  2108. }
  2109. size_t meshopt_simplifySloppy(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions_data, size_t vertex_count, size_t vertex_positions_stride, const unsigned char* vertex_lock, size_t target_index_count, float target_error, float* out_result_error)
  2110. {
  2111. using namespace meshopt;
  2112. assert(index_count % 3 == 0);
  2113. assert(vertex_positions_stride >= 12 && vertex_positions_stride <= 256);
  2114. assert(vertex_positions_stride % sizeof(float) == 0);
  2115. assert(target_index_count <= index_count);
  2116. // we expect to get ~2 triangles/vertex in the output
  2117. size_t target_cell_count = target_index_count / 6;
  2118. meshopt_Allocator allocator;
  2119. Vector3* vertex_positions = allocator.allocate<Vector3>(vertex_count);
  2120. rescalePositions(vertex_positions, vertex_positions_data, vertex_count, vertex_positions_stride);
  2121. // find the optimal grid size using guided binary search
  2122. #if TRACE
  2123. printf("source: %d vertices, %d triangles\n", int(vertex_count), int(index_count / 3));
  2124. printf("target: %d cells, %d triangles\n", int(target_cell_count), int(target_index_count / 3));
  2125. #endif
  2126. unsigned int* vertex_ids = allocator.allocate<unsigned int>(vertex_count);
  2127. const int kInterpolationPasses = 5;
  2128. // invariant: # of triangles in min_grid <= target_count
  2129. int min_grid = int(1.f / (target_error < 1e-3f ? 1e-3f : (target_error < 1.f ? target_error : 1.f)));
  2130. int max_grid = 1025;
  2131. size_t min_triangles = 0;
  2132. size_t max_triangles = index_count / 3;
  2133. // when we're error-limited, we compute the triangle count for the min. size; this accelerates convergence and provides the correct answer when we can't use a larger grid
  2134. if (min_grid > 1 || vertex_lock)
  2135. {
  2136. computeVertexIds(vertex_ids, vertex_positions, vertex_lock, vertex_count, min_grid);
  2137. min_triangles = countTriangles(vertex_ids, indices, index_count);
  2138. }
  2139. // instead of starting in the middle, let's guess as to what the answer might be! triangle count usually grows as a square of grid size...
  2140. int next_grid_size = int(sqrtf(float(target_cell_count)) + 0.5f);
  2141. for (int pass = 0; pass < 10 + kInterpolationPasses; ++pass)
  2142. {
  2143. if (min_triangles >= target_index_count / 3 || max_grid - min_grid <= 1)
  2144. break;
  2145. // we clamp the prediction of the grid size to make sure that the search converges
  2146. int grid_size = next_grid_size;
  2147. grid_size = (grid_size <= min_grid) ? min_grid + 1 : (grid_size >= max_grid ? max_grid - 1 : grid_size);
  2148. computeVertexIds(vertex_ids, vertex_positions, vertex_lock, vertex_count, grid_size);
  2149. size_t triangles = countTriangles(vertex_ids, indices, index_count);
  2150. #if TRACE
  2151. printf("pass %d (%s): grid size %d, triangles %d, %s\n",
  2152. pass, (pass == 0) ? "guess" : (pass <= kInterpolationPasses ? "lerp" : "binary"),
  2153. grid_size, int(triangles),
  2154. (triangles <= target_index_count / 3) ? "under" : "over");
  2155. #endif
  2156. float tip = interpolate(float(size_t(target_index_count / 3)), float(min_grid), float(min_triangles), float(grid_size), float(triangles), float(max_grid), float(max_triangles));
  2157. if (triangles <= target_index_count / 3)
  2158. {
  2159. min_grid = grid_size;
  2160. min_triangles = triangles;
  2161. }
  2162. else
  2163. {
  2164. max_grid = grid_size;
  2165. max_triangles = triangles;
  2166. }
  2167. // we start by using interpolation search - it usually converges faster
  2168. // however, interpolation search has a worst case of O(N) so we switch to binary search after a few iterations which converges in O(logN)
  2169. next_grid_size = (pass < kInterpolationPasses) ? int(tip + 0.5f) : (min_grid + max_grid) / 2;
  2170. }
  2171. if (min_triangles == 0)
  2172. {
  2173. if (out_result_error)
  2174. *out_result_error = 1.f;
  2175. return 0;
  2176. }
  2177. // build vertex->cell association by mapping all vertices with the same quantized position to the same cell
  2178. size_t table_size = hashBuckets2(vertex_count);
  2179. unsigned int* table = allocator.allocate<unsigned int>(table_size);
  2180. unsigned int* vertex_cells = allocator.allocate<unsigned int>(vertex_count);
  2181. computeVertexIds(vertex_ids, vertex_positions, vertex_lock, vertex_count, min_grid);
  2182. size_t cell_count = fillVertexCells(table, table_size, vertex_cells, vertex_ids, vertex_count);
  2183. // build a quadric for each target cell
  2184. Quadric* cell_quadrics = allocator.allocate<Quadric>(cell_count);
  2185. memset(cell_quadrics, 0, cell_count * sizeof(Quadric));
  2186. fillCellQuadrics(cell_quadrics, indices, index_count, vertex_positions, vertex_cells);
  2187. // for each target cell, find the vertex with the minimal error
  2188. unsigned int* cell_remap = allocator.allocate<unsigned int>(cell_count);
  2189. float* cell_errors = allocator.allocate<float>(cell_count);
  2190. fillCellRemap(cell_remap, cell_errors, cell_count, vertex_cells, cell_quadrics, vertex_positions, vertex_count);
  2191. // compute error
  2192. float result_error = 0.f;
  2193. for (size_t i = 0; i < cell_count; ++i)
  2194. result_error = result_error < cell_errors[i] ? cell_errors[i] : result_error;
  2195. // vertex collapses often result in duplicate triangles; we need a table to filter them out
  2196. size_t tritable_size = hashBuckets2(min_triangles);
  2197. unsigned int* tritable = allocator.allocate<unsigned int>(tritable_size);
  2198. // note: this is the first and last write to destination, which allows aliasing destination with indices
  2199. size_t write = filterTriangles(destination, tritable, tritable_size, indices, index_count, vertex_cells, cell_remap);
  2200. #if TRACE
  2201. printf("result: grid size %d, %d cells, %d triangles (%d unfiltered), error %e\n", min_grid, int(cell_count), int(write / 3), int(min_triangles), sqrtf(result_error));
  2202. #endif
  2203. if (out_result_error)
  2204. *out_result_error = sqrtf(result_error);
  2205. return write;
  2206. }
  2207. size_t meshopt_simplifyPrune(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions_data, size_t vertex_count, size_t vertex_positions_stride, float target_error)
  2208. {
  2209. using namespace meshopt;
  2210. assert(index_count % 3 == 0);
  2211. assert(vertex_positions_stride >= 12 && vertex_positions_stride <= 256);
  2212. assert(vertex_positions_stride % sizeof(float) == 0);
  2213. assert(target_error >= 0);
  2214. meshopt_Allocator allocator;
  2215. unsigned int* result = destination;
  2216. if (result != indices)
  2217. memcpy(result, indices, index_count * sizeof(unsigned int));
  2218. // build position remap that maps each vertex to the one with identical position
  2219. unsigned int* remap = allocator.allocate<unsigned int>(vertex_count);
  2220. buildPositionRemap(remap, NULL, vertex_positions_data, vertex_count, vertex_positions_stride, NULL, allocator);
  2221. Vector3* vertex_positions = allocator.allocate<Vector3>(vertex_count);
  2222. rescalePositions(vertex_positions, vertex_positions_data, vertex_count, vertex_positions_stride, NULL);
  2223. unsigned int* components = allocator.allocate<unsigned int>(vertex_count);
  2224. size_t component_count = buildComponents(components, vertex_count, indices, index_count, remap);
  2225. float* component_errors = allocator.allocate<float>(component_count * 4); // overallocate for temporary use inside measureComponents
  2226. measureComponents(component_errors, component_count, components, vertex_positions, vertex_count);
  2227. float component_nexterror = 0;
  2228. size_t result_count = pruneComponents(result, index_count, components, component_errors, component_count, target_error * target_error, component_nexterror);
  2229. return result_count;
  2230. }
  2231. size_t meshopt_simplifyPoints(unsigned int* destination, const float* vertex_positions_data, size_t vertex_count, size_t vertex_positions_stride, const float* vertex_colors, size_t vertex_colors_stride, float color_weight, size_t target_vertex_count)
  2232. {
  2233. using namespace meshopt;
  2234. assert(vertex_positions_stride >= 12 && vertex_positions_stride <= 256);
  2235. assert(vertex_positions_stride % sizeof(float) == 0);
  2236. assert(vertex_colors_stride == 0 || (vertex_colors_stride >= 12 && vertex_colors_stride <= 256));
  2237. assert(vertex_colors_stride % sizeof(float) == 0);
  2238. assert(vertex_colors == NULL || vertex_colors_stride != 0);
  2239. assert(target_vertex_count <= vertex_count);
  2240. size_t target_cell_count = target_vertex_count;
  2241. if (target_cell_count == 0)
  2242. return 0;
  2243. meshopt_Allocator allocator;
  2244. Vector3* vertex_positions = allocator.allocate<Vector3>(vertex_count);
  2245. rescalePositions(vertex_positions, vertex_positions_data, vertex_count, vertex_positions_stride);
  2246. // find the optimal grid size using guided binary search
  2247. #if TRACE
  2248. printf("source: %d vertices\n", int(vertex_count));
  2249. printf("target: %d cells\n", int(target_cell_count));
  2250. #endif
  2251. unsigned int* vertex_ids = allocator.allocate<unsigned int>(vertex_count);
  2252. size_t table_size = hashBuckets2(vertex_count);
  2253. unsigned int* table = allocator.allocate<unsigned int>(table_size);
  2254. const int kInterpolationPasses = 5;
  2255. // invariant: # of vertices in min_grid <= target_count
  2256. int min_grid = 0;
  2257. int max_grid = 1025;
  2258. size_t min_vertices = 0;
  2259. size_t max_vertices = vertex_count;
  2260. // instead of starting in the middle, let's guess as to what the answer might be! triangle count usually grows as a square of grid size...
  2261. int next_grid_size = int(sqrtf(float(target_cell_count)) + 0.5f);
  2262. for (int pass = 0; pass < 10 + kInterpolationPasses; ++pass)
  2263. {
  2264. assert(min_vertices < target_vertex_count);
  2265. assert(max_grid - min_grid > 1);
  2266. // we clamp the prediction of the grid size to make sure that the search converges
  2267. int grid_size = next_grid_size;
  2268. grid_size = (grid_size <= min_grid) ? min_grid + 1 : (grid_size >= max_grid ? max_grid - 1 : grid_size);
  2269. computeVertexIds(vertex_ids, vertex_positions, NULL, vertex_count, grid_size);
  2270. size_t vertices = countVertexCells(table, table_size, vertex_ids, vertex_count);
  2271. #if TRACE
  2272. printf("pass %d (%s): grid size %d, vertices %d, %s\n",
  2273. pass, (pass == 0) ? "guess" : (pass <= kInterpolationPasses ? "lerp" : "binary"),
  2274. grid_size, int(vertices),
  2275. (vertices <= target_vertex_count) ? "under" : "over");
  2276. #endif
  2277. float tip = interpolate(float(target_vertex_count), float(min_grid), float(min_vertices), float(grid_size), float(vertices), float(max_grid), float(max_vertices));
  2278. if (vertices <= target_vertex_count)
  2279. {
  2280. min_grid = grid_size;
  2281. min_vertices = vertices;
  2282. }
  2283. else
  2284. {
  2285. max_grid = grid_size;
  2286. max_vertices = vertices;
  2287. }
  2288. if (vertices == target_vertex_count || max_grid - min_grid <= 1)
  2289. break;
  2290. // we start by using interpolation search - it usually converges faster
  2291. // however, interpolation search has a worst case of O(N) so we switch to binary search after a few iterations which converges in O(logN)
  2292. next_grid_size = (pass < kInterpolationPasses) ? int(tip + 0.5f) : (min_grid + max_grid) / 2;
  2293. }
  2294. if (min_vertices == 0)
  2295. return 0;
  2296. // build vertex->cell association by mapping all vertices with the same quantized position to the same cell
  2297. unsigned int* vertex_cells = allocator.allocate<unsigned int>(vertex_count);
  2298. computeVertexIds(vertex_ids, vertex_positions, NULL, vertex_count, min_grid);
  2299. size_t cell_count = fillVertexCells(table, table_size, vertex_cells, vertex_ids, vertex_count);
  2300. // accumulate points into a reservoir for each target cell
  2301. Reservoir* cell_reservoirs = allocator.allocate<Reservoir>(cell_count);
  2302. memset(cell_reservoirs, 0, cell_count * sizeof(Reservoir));
  2303. fillCellReservoirs(cell_reservoirs, cell_count, vertex_positions, vertex_colors, vertex_colors_stride, vertex_count, vertex_cells);
  2304. // for each target cell, find the vertex with the minimal error
  2305. unsigned int* cell_remap = allocator.allocate<unsigned int>(cell_count);
  2306. float* cell_errors = allocator.allocate<float>(cell_count);
  2307. // we scale the color weight to bring it to the same scale as position so that error addition makes sense
  2308. float color_weight_scaled = color_weight * (min_grid == 1 ? 1.f : 1.f / (min_grid - 1));
  2309. fillCellRemap(cell_remap, cell_errors, cell_count, vertex_cells, cell_reservoirs, vertex_positions, vertex_colors, vertex_colors_stride, color_weight_scaled * color_weight_scaled, vertex_count);
  2310. // copy results to the output
  2311. assert(cell_count <= target_vertex_count);
  2312. memcpy(destination, cell_remap, sizeof(unsigned int) * cell_count);
  2313. #if TRACE
  2314. // compute error
  2315. float result_error = 0.f;
  2316. for (size_t i = 0; i < cell_count; ++i)
  2317. result_error = result_error < cell_errors[i] ? cell_errors[i] : result_error;
  2318. printf("result: %d cells, %e error\n", int(cell_count), sqrtf(result_error));
  2319. #endif
  2320. return cell_count;
  2321. }
  2322. float meshopt_simplifyScale(const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride)
  2323. {
  2324. using namespace meshopt;
  2325. assert(vertex_positions_stride >= 12 && vertex_positions_stride <= 256);
  2326. assert(vertex_positions_stride % sizeof(float) == 0);
  2327. float extent = rescalePositions(NULL, vertex_positions, vertex_count, vertex_positions_stride);
  2328. return extent;
  2329. }