meshoptimizer.h 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959
  1. /**
  2. * meshoptimizer - version 0.15
  3. *
  4. * Copyright (C) 2016-2020, by Arseny Kapoulkine ([email protected])
  5. * Report bugs and download new versions at https://github.com/zeux/meshoptimizer
  6. *
  7. * This library is distributed under the MIT License. See notice at the end of this file.
  8. */
  9. #pragma once
  10. #include <assert.h>
  11. #include <stddef.h>
  12. /* Version macro; major * 1000 + minor * 10 + patch */
  13. #define MESHOPTIMIZER_VERSION 150 /* 0.15 */
  14. /* If no API is defined, assume default */
  15. #ifndef MESHOPTIMIZER_API
  16. #define MESHOPTIMIZER_API
  17. #endif
  18. /* Experimental APIs have unstable interface and might have implementation that's not fully tested or optimized */
  19. #define MESHOPTIMIZER_EXPERIMENTAL MESHOPTIMIZER_API
  20. /* C interface */
  21. #ifdef __cplusplus
  22. extern "C" {
  23. #endif
  24. /**
  25. * Vertex attribute stream, similar to glVertexPointer
  26. * Each element takes size bytes, with stride controlling the spacing between successive elements.
  27. */
  28. struct meshopt_Stream
  29. {
  30. const void* data;
  31. size_t size;
  32. size_t stride;
  33. };
  34. /**
  35. * Generates a vertex remap table from the vertex buffer and an optional index buffer and returns number of unique vertices
  36. * As a result, all vertices that are binary equivalent map to the same (new) location, with no gaps in the resulting sequence.
  37. * Resulting remap table maps old vertices to new vertices and can be used in meshopt_remapVertexBuffer/meshopt_remapIndexBuffer.
  38. * Note that binary equivalence considers all vertex_size bytes, including padding which should be zero-initialized.
  39. *
  40. * destination must contain enough space for the resulting remap table (vertex_count elements)
  41. * indices can be NULL if the input is unindexed
  42. */
  43. MESHOPTIMIZER_API size_t meshopt_generateVertexRemap(unsigned int* destination, const unsigned int* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size);
  44. /**
  45. * Generates a vertex remap table from multiple vertex streams and an optional index buffer and returns number of unique vertices
  46. * As a result, all vertices that are binary equivalent map to the same (new) location, with no gaps in the resulting sequence.
  47. * Resulting remap table maps old vertices to new vertices and can be used in meshopt_remapVertexBuffer/meshopt_remapIndexBuffer.
  48. * To remap vertex buffers, you will need to call meshopt_remapVertexBuffer for each vertex stream.
  49. * Note that binary equivalence considers all size bytes in each stream, including padding which should be zero-initialized.
  50. *
  51. * destination must contain enough space for the resulting remap table (vertex_count elements)
  52. * indices can be NULL if the input is unindexed
  53. */
  54. MESHOPTIMIZER_API size_t meshopt_generateVertexRemapMulti(unsigned int* destination, const unsigned int* indices, size_t index_count, size_t vertex_count, const struct meshopt_Stream* streams, size_t stream_count);
  55. /**
  56. * Generates vertex buffer from the source vertex buffer and remap table generated by meshopt_generateVertexRemap
  57. *
  58. * destination must contain enough space for the resulting vertex buffer (unique_vertex_count elements, returned by meshopt_generateVertexRemap)
  59. * vertex_count should be the initial vertex count and not the value returned by meshopt_generateVertexRemap
  60. */
  61. MESHOPTIMIZER_API void meshopt_remapVertexBuffer(void* destination, const void* vertices, size_t vertex_count, size_t vertex_size, const unsigned int* remap);
  62. /**
  63. * Generate index buffer from the source index buffer and remap table generated by meshopt_generateVertexRemap
  64. *
  65. * destination must contain enough space for the resulting index buffer (index_count elements)
  66. * indices can be NULL if the input is unindexed
  67. */
  68. MESHOPTIMIZER_API void meshopt_remapIndexBuffer(unsigned int* destination, const unsigned int* indices, size_t index_count, const unsigned int* remap);
  69. /**
  70. * Generate index buffer that can be used for more efficient rendering when only a subset of the vertex attributes is necessary
  71. * All vertices that are binary equivalent (wrt first vertex_size bytes) map to the first vertex in the original vertex buffer.
  72. * This makes it possible to use the index buffer for Z pre-pass or shadowmap rendering, while using the original index buffer for regular rendering.
  73. * Note that binary equivalence considers all vertex_size bytes, including padding which should be zero-initialized.
  74. *
  75. * destination must contain enough space for the resulting index buffer (index_count elements)
  76. */
  77. MESHOPTIMIZER_API void meshopt_generateShadowIndexBuffer(unsigned int* destination, const unsigned int* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size, size_t vertex_stride);
  78. /**
  79. * Generate index buffer that can be used for more efficient rendering when only a subset of the vertex attributes is necessary
  80. * All vertices that are binary equivalent (wrt specified streams) map to the first vertex in the original vertex buffer.
  81. * This makes it possible to use the index buffer for Z pre-pass or shadowmap rendering, while using the original index buffer for regular rendering.
  82. * Note that binary equivalence considers all size bytes in each stream, including padding which should be zero-initialized.
  83. *
  84. * destination must contain enough space for the resulting index buffer (index_count elements)
  85. */
  86. MESHOPTIMIZER_API void meshopt_generateShadowIndexBufferMulti(unsigned int* destination, const unsigned int* indices, size_t index_count, size_t vertex_count, const struct meshopt_Stream* streams, size_t stream_count);
  87. /**
  88. * Vertex transform cache optimizer
  89. * Reorders indices to reduce the number of GPU vertex shader invocations
  90. * If index buffer contains multiple ranges for multiple draw calls, this functions needs to be called on each range individually.
  91. *
  92. * destination must contain enough space for the resulting index buffer (index_count elements)
  93. */
  94. MESHOPTIMIZER_API void meshopt_optimizeVertexCache(unsigned int* destination, const unsigned int* indices, size_t index_count, size_t vertex_count);
  95. /**
  96. * Vertex transform cache optimizer for strip-like caches
  97. * Produces inferior results to meshopt_optimizeVertexCache from the GPU vertex cache perspective
  98. * However, the resulting index order is more optimal if the goal is to reduce the triangle strip length or improve compression efficiency
  99. *
  100. * destination must contain enough space for the resulting index buffer (index_count elements)
  101. */
  102. MESHOPTIMIZER_API void meshopt_optimizeVertexCacheStrip(unsigned int* destination, const unsigned int* indices, size_t index_count, size_t vertex_count);
  103. /**
  104. * Vertex transform cache optimizer for FIFO caches
  105. * Reorders indices to reduce the number of GPU vertex shader invocations
  106. * Generally takes ~3x less time to optimize meshes but produces inferior results compared to meshopt_optimizeVertexCache
  107. * If index buffer contains multiple ranges for multiple draw calls, this functions needs to be called on each range individually.
  108. *
  109. * destination must contain enough space for the resulting index buffer (index_count elements)
  110. * cache_size should be less than the actual GPU cache size to avoid cache thrashing
  111. */
  112. MESHOPTIMIZER_API void meshopt_optimizeVertexCacheFifo(unsigned int* destination, const unsigned int* indices, size_t index_count, size_t vertex_count, unsigned int cache_size);
  113. /**
  114. * Overdraw optimizer
  115. * Reorders indices to reduce the number of GPU vertex shader invocations and the pixel overdraw
  116. * If index buffer contains multiple ranges for multiple draw calls, this functions needs to be called on each range individually.
  117. *
  118. * destination must contain enough space for the resulting index buffer (index_count elements)
  119. * indices must contain index data that is the result of meshopt_optimizeVertexCache (*not* the original mesh indices!)
  120. * vertex_positions should have float3 position in the first 12 bytes of each vertex - similar to glVertexPointer
  121. * threshold indicates how much the overdraw optimizer can degrade vertex cache efficiency (1.05 = up to 5%) to reduce overdraw more efficiently
  122. */
  123. MESHOPTIMIZER_API void meshopt_optimizeOverdraw(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, float threshold);
  124. /**
  125. * Vertex fetch cache optimizer
  126. * Reorders vertices and changes indices to reduce the amount of GPU memory fetches during vertex processing
  127. * Returns the number of unique vertices, which is the same as input vertex count unless some vertices are unused
  128. * This functions works for a single vertex stream; for multiple vertex streams, use meshopt_optimizeVertexFetchRemap + meshopt_remapVertexBuffer for each stream.
  129. *
  130. * destination must contain enough space for the resulting vertex buffer (vertex_count elements)
  131. * indices is used both as an input and as an output index buffer
  132. */
  133. MESHOPTIMIZER_API size_t meshopt_optimizeVertexFetch(void* destination, unsigned int* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size);
  134. /**
  135. * Vertex fetch cache optimizer
  136. * Generates vertex remap to reduce the amount of GPU memory fetches during vertex processing
  137. * Returns the number of unique vertices, which is the same as input vertex count unless some vertices are unused
  138. * The resulting remap table should be used to reorder vertex/index buffers using meshopt_remapVertexBuffer/meshopt_remapIndexBuffer
  139. *
  140. * destination must contain enough space for the resulting remap table (vertex_count elements)
  141. */
  142. MESHOPTIMIZER_API size_t meshopt_optimizeVertexFetchRemap(unsigned int* destination, const unsigned int* indices, size_t index_count, size_t vertex_count);
  143. /**
  144. * Index buffer encoder
  145. * Encodes index data into an array of bytes that is generally much smaller (<1.5 bytes/triangle) and compresses better (<1 bytes/triangle) compared to original.
  146. * Input index buffer must represent a triangle list.
  147. * Returns encoded data size on success, 0 on error; the only error condition is if buffer doesn't have enough space
  148. * For maximum efficiency the index buffer being encoded has to be optimized for vertex cache and vertex fetch first.
  149. *
  150. * buffer must contain enough space for the encoded index buffer (use meshopt_encodeIndexBufferBound to compute worst case size)
  151. */
  152. MESHOPTIMIZER_API size_t meshopt_encodeIndexBuffer(unsigned char* buffer, size_t buffer_size, const unsigned int* indices, size_t index_count);
  153. MESHOPTIMIZER_API size_t meshopt_encodeIndexBufferBound(size_t index_count, size_t vertex_count);
  154. /**
  155. * Experimental: Set index encoder format version
  156. * version must specify the data format version to encode; valid values are 0 (decodable by all library versions) and 1 (decodable by 0.14+)
  157. */
  158. MESHOPTIMIZER_EXPERIMENTAL void meshopt_encodeIndexVersion(int version);
  159. /**
  160. * Index buffer decoder
  161. * Decodes index data from an array of bytes generated by meshopt_encodeIndexBuffer
  162. * Returns 0 if decoding was successful, and an error code otherwise
  163. * The decoder is safe to use for untrusted input, but it may produce garbage data (e.g. out of range indices).
  164. *
  165. * destination must contain enough space for the resulting index buffer (index_count elements)
  166. */
  167. MESHOPTIMIZER_API int meshopt_decodeIndexBuffer(void* destination, size_t index_count, size_t index_size, const unsigned char* buffer, size_t buffer_size);
  168. /**
  169. * Experimental: Index sequence encoder
  170. * Encodes index sequence into an array of bytes that is generally smaller and compresses better compared to original.
  171. * Input index sequence can represent arbitrary topology; for triangle lists meshopt_encodeIndexBuffer is likely to be better.
  172. * Returns encoded data size on success, 0 on error; the only error condition is if buffer doesn't have enough space
  173. *
  174. * buffer must contain enough space for the encoded index sequence (use meshopt_encodeIndexSequenceBound to compute worst case size)
  175. */
  176. MESHOPTIMIZER_EXPERIMENTAL size_t meshopt_encodeIndexSequence(unsigned char* buffer, size_t buffer_size, const unsigned int* indices, size_t index_count);
  177. MESHOPTIMIZER_EXPERIMENTAL size_t meshopt_encodeIndexSequenceBound(size_t index_count, size_t vertex_count);
  178. /**
  179. * Index sequence decoder
  180. * Decodes index data from an array of bytes generated by meshopt_encodeIndexSequence
  181. * Returns 0 if decoding was successful, and an error code otherwise
  182. * The decoder is safe to use for untrusted input, but it may produce garbage data (e.g. out of range indices).
  183. *
  184. * destination must contain enough space for the resulting index sequence (index_count elements)
  185. */
  186. MESHOPTIMIZER_EXPERIMENTAL int meshopt_decodeIndexSequence(void* destination, size_t index_count, size_t index_size, const unsigned char* buffer, size_t buffer_size);
  187. /**
  188. * Vertex buffer encoder
  189. * Encodes vertex data into an array of bytes that is generally smaller and compresses better compared to original.
  190. * Returns encoded data size on success, 0 on error; the only error condition is if buffer doesn't have enough space
  191. * This function works for a single vertex stream; for multiple vertex streams, call meshopt_encodeVertexBuffer for each stream.
  192. * Note that all vertex_size bytes of each vertex are encoded verbatim, including padding which should be zero-initialized.
  193. *
  194. * buffer must contain enough space for the encoded vertex buffer (use meshopt_encodeVertexBufferBound to compute worst case size)
  195. */
  196. MESHOPTIMIZER_API size_t meshopt_encodeVertexBuffer(unsigned char* buffer, size_t buffer_size, const void* vertices, size_t vertex_count, size_t vertex_size);
  197. MESHOPTIMIZER_API size_t meshopt_encodeVertexBufferBound(size_t vertex_count, size_t vertex_size);
  198. /**
  199. * Experimental: Set vertex encoder format version
  200. * version must specify the data format version to encode; valid values are 0 (decodable by all library versions)
  201. */
  202. MESHOPTIMIZER_EXPERIMENTAL void meshopt_encodeVertexVersion(int version);
  203. /**
  204. * Vertex buffer decoder
  205. * Decodes vertex data from an array of bytes generated by meshopt_encodeVertexBuffer
  206. * Returns 0 if decoding was successful, and an error code otherwise
  207. * The decoder is safe to use for untrusted input, but it may produce garbage data.
  208. *
  209. * destination must contain enough space for the resulting vertex buffer (vertex_count * vertex_size bytes)
  210. */
  211. MESHOPTIMIZER_API int meshopt_decodeVertexBuffer(void* destination, size_t vertex_count, size_t vertex_size, const unsigned char* buffer, size_t buffer_size);
  212. /**
  213. * Vertex buffer filters
  214. * These functions can be used to filter output of meshopt_decodeVertexBuffer in-place.
  215. *
  216. * meshopt_decodeFilterOct decodes octahedral encoding of a unit vector with K-bit (K <= 16) signed X/Y as an input; Z must store 1.0f.
  217. * Each component is stored as an 8-bit or 16-bit normalized integer; stride must be equal to 4 or 8. W is preserved as is.
  218. *
  219. * meshopt_decodeFilterQuat decodes 3-component quaternion encoding with K-bit (4 <= K <= 16) component encoding and a 2-bit component index indicating which component to reconstruct.
  220. * Each component is stored as an 16-bit integer; stride must be equal to 8.
  221. *
  222. * meshopt_decodeFilterExp decodes exponential encoding of floating-point data with 8-bit exponent and 24-bit integer mantissa as 2^E*M.
  223. * Each 32-bit component is decoded in isolation; stride must be divisible by 4.
  224. */
  225. MESHOPTIMIZER_EXPERIMENTAL void meshopt_decodeFilterOct(void* buffer, size_t vertex_count, size_t vertex_size);
  226. MESHOPTIMIZER_EXPERIMENTAL void meshopt_decodeFilterQuat(void* buffer, size_t vertex_count, size_t vertex_size);
  227. MESHOPTIMIZER_EXPERIMENTAL void meshopt_decodeFilterExp(void* buffer, size_t vertex_count, size_t vertex_size);
  228. /**
  229. * Experimental: Mesh simplifier
  230. * Reduces the number of triangles in the mesh, attempting to preserve mesh appearance as much as possible
  231. * The algorithm tries to preserve mesh topology and can stop short of the target goal based on topology constraints or target error.
  232. * If not all attributes from the input mesh are required, it's recommended to reindex the mesh using meshopt_generateShadowIndexBuffer prior to simplification.
  233. * Returns the number of indices after simplification, with destination containing new index data
  234. * The resulting index buffer references vertices from the original vertex buffer.
  235. * If the original vertex data isn't required, creating a compact vertex buffer using meshopt_optimizeVertexFetch is recommended.
  236. *
  237. * destination must contain enough space for the target index buffer, worst case is index_count elements (*not* target_index_count)!
  238. * vertex_positions should have float3 position in the first 12 bytes of each vertex - similar to glVertexPointer
  239. * target_error represents the error relative to mesh extents that can be tolerated, e.g. 0.01 = 1% deformation
  240. * result_error can be NULL; when it's not NULL, it will contain the resulting (relative) error after simplification
  241. */
  242. MESHOPTIMIZER_EXPERIMENTAL size_t meshopt_simplify(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, size_t target_index_count, float target_error, float* result_error);
  243. /**
  244. * Experimental: Mesh simplifier (sloppy)
  245. * Reduces the number of triangles in the mesh, sacrificing mesh apperance for simplification performance
  246. * The algorithm doesn't preserve mesh topology but can stop short of the target goal based on target error.
  247. * Returns the number of indices after simplification, with destination containing new index data
  248. * The resulting index buffer references vertices from the original vertex buffer.
  249. * If the original vertex data isn't required, creating a compact vertex buffer using meshopt_optimizeVertexFetch is recommended.
  250. *
  251. * destination must contain enough space for the target index buffer, worst case is index_count elements (*not* target_index_count)!
  252. * vertex_positions should have float3 position in the first 12 bytes of each vertex - similar to glVertexPointer
  253. * target_error represents the error relative to mesh extents that can be tolerated, e.g. 0.01 = 1% deformation
  254. * result_error can be NULL; when it's not NULL, it will contain the resulting (relative) error after simplification
  255. */
  256. MESHOPTIMIZER_EXPERIMENTAL size_t meshopt_simplifySloppy(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, size_t target_index_count, float target_error, float* result_error);
  257. /**
  258. * Experimental: Point cloud simplifier
  259. * Reduces the number of points in the cloud to reach the given target
  260. * Returns the number of points after simplification, with destination containing new index data
  261. * The resulting index buffer references vertices from the original vertex buffer.
  262. * If the original vertex data isn't required, creating a compact vertex buffer using meshopt_optimizeVertexFetch is recommended.
  263. *
  264. * destination must contain enough space for the target index buffer (target_vertex_count elements)
  265. * vertex_positions should have float3 position in the first 12 bytes of each vertex - similar to glVertexPointer
  266. */
  267. MESHOPTIMIZER_EXPERIMENTAL size_t meshopt_simplifyPoints(unsigned int* destination, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, size_t target_vertex_count);
  268. /**
  269. * Experimental: Returns the error scaling factor used by the simplifier to convert between absolute and relative extents
  270. *
  271. * Absolute error must be *divided* by the scaling factor before passing it to meshopt_simplify as target_error
  272. * Relative error returned by meshopt_simplify via result_error must be *multiplied* by the scaling factor to get absolute error.
  273. */
  274. MESHOPTIMIZER_EXPERIMENTAL float meshopt_simplifyScale(const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride);
  275. /**
  276. * Mesh stripifier
  277. * Converts a previously vertex cache optimized triangle list to triangle strip, stitching strips using restart index or degenerate triangles
  278. * Returns the number of indices in the resulting strip, with destination containing new index data
  279. * For maximum efficiency the index buffer being converted has to be optimized for vertex cache first.
  280. * Using restart indices can result in ~10% smaller index buffers, but on some GPUs restart indices may result in decreased performance.
  281. *
  282. * destination must contain enough space for the target index buffer, worst case can be computed with meshopt_stripifyBound
  283. * restart_index should be 0xffff or 0xffffffff depending on index size, or 0 to use degenerate triangles
  284. */
  285. MESHOPTIMIZER_API size_t meshopt_stripify(unsigned int* destination, const unsigned int* indices, size_t index_count, size_t vertex_count, unsigned int restart_index);
  286. MESHOPTIMIZER_API size_t meshopt_stripifyBound(size_t index_count);
  287. /**
  288. * Mesh unstripifier
  289. * Converts a triangle strip to a triangle list
  290. * Returns the number of indices in the resulting list, with destination containing new index data
  291. *
  292. * destination must contain enough space for the target index buffer, worst case can be computed with meshopt_unstripifyBound
  293. */
  294. MESHOPTIMIZER_API size_t meshopt_unstripify(unsigned int* destination, const unsigned int* indices, size_t index_count, unsigned int restart_index);
  295. MESHOPTIMIZER_API size_t meshopt_unstripifyBound(size_t index_count);
  296. struct meshopt_VertexCacheStatistics
  297. {
  298. unsigned int vertices_transformed;
  299. unsigned int warps_executed;
  300. float acmr; /* transformed vertices / triangle count; best case 0.5, worst case 3.0, optimum depends on topology */
  301. float atvr; /* transformed vertices / vertex count; best case 1.0, worst case 6.0, optimum is 1.0 (each vertex is transformed once) */
  302. };
  303. /**
  304. * Vertex transform cache analyzer
  305. * Returns cache hit statistics using a simplified FIFO model
  306. * Results may not match actual GPU performance
  307. */
  308. MESHOPTIMIZER_API struct meshopt_VertexCacheStatistics meshopt_analyzeVertexCache(const unsigned int* indices, size_t index_count, size_t vertex_count, unsigned int cache_size, unsigned int warp_size, unsigned int primgroup_size);
  309. struct meshopt_OverdrawStatistics
  310. {
  311. unsigned int pixels_covered;
  312. unsigned int pixels_shaded;
  313. float overdraw; /* shaded pixels / covered pixels; best case 1.0 */
  314. };
  315. /**
  316. * Overdraw analyzer
  317. * Returns overdraw statistics using a software rasterizer
  318. * Results may not match actual GPU performance
  319. *
  320. * vertex_positions should have float3 position in the first 12 bytes of each vertex - similar to glVertexPointer
  321. */
  322. MESHOPTIMIZER_API struct meshopt_OverdrawStatistics meshopt_analyzeOverdraw(const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride);
  323. struct meshopt_VertexFetchStatistics
  324. {
  325. unsigned int bytes_fetched;
  326. float overfetch; /* fetched bytes / vertex buffer size; best case 1.0 (each byte is fetched once) */
  327. };
  328. /**
  329. * Vertex fetch cache analyzer
  330. * Returns cache hit statistics using a simplified direct mapped model
  331. * Results may not match actual GPU performance
  332. */
  333. MESHOPTIMIZER_API struct meshopt_VertexFetchStatistics meshopt_analyzeVertexFetch(const unsigned int* indices, size_t index_count, size_t vertex_count, size_t vertex_size);
  334. struct meshopt_Meshlet
  335. {
  336. unsigned int vertices[64];
  337. unsigned char indices[126][3];
  338. unsigned char triangle_count;
  339. unsigned char vertex_count;
  340. };
  341. /**
  342. * Experimental: Meshlet builder
  343. * Splits the mesh into a set of meshlets where each meshlet has a micro index buffer indexing into meshlet vertices that refer to the original vertex buffer
  344. * The resulting data can be used to render meshes using NVidia programmable mesh shading pipeline, or in other cluster-based renderers.
  345. * For maximum efficiency the index buffer being converted has to be optimized for vertex cache first.
  346. *
  347. * destination must contain enough space for all meshlets, worst case size can be computed with meshopt_buildMeshletsBound
  348. * max_vertices and max_triangles can't exceed limits statically declared in meshopt_Meshlet (max_vertices <= 64, max_triangles <= 126)
  349. */
  350. MESHOPTIMIZER_EXPERIMENTAL size_t meshopt_buildMeshlets(struct meshopt_Meshlet* destination, const unsigned int* indices, size_t index_count, size_t vertex_count, size_t max_vertices, size_t max_triangles);
  351. MESHOPTIMIZER_EXPERIMENTAL size_t meshopt_buildMeshletsBound(size_t index_count, size_t max_vertices, size_t max_triangles);
  352. struct meshopt_Bounds
  353. {
  354. /* bounding sphere, useful for frustum and occlusion culling */
  355. float center[3];
  356. float radius;
  357. /* normal cone, useful for backface culling */
  358. float cone_apex[3];
  359. float cone_axis[3];
  360. float cone_cutoff; /* = cos(angle/2) */
  361. /* normal cone axis and cutoff, stored in 8-bit SNORM format; decode using x/127.0 */
  362. signed char cone_axis_s8[3];
  363. signed char cone_cutoff_s8;
  364. };
  365. /**
  366. * Experimental: Cluster bounds generator
  367. * Creates bounding volumes that can be used for frustum, backface and occlusion culling.
  368. *
  369. * For backface culling with orthographic projection, use the following formula to reject backfacing clusters:
  370. * dot(view, cone_axis) >= cone_cutoff
  371. *
  372. * For perspective projection, you can the formula that needs cone apex in addition to axis & cutoff:
  373. * dot(normalize(cone_apex - camera_position), cone_axis) >= cone_cutoff
  374. *
  375. * Alternatively, you can use the formula that doesn't need cone apex and uses bounding sphere instead:
  376. * dot(normalize(center - camera_position), cone_axis) >= cone_cutoff + radius / length(center - camera_position)
  377. * or an equivalent formula that doesn't have a singularity at center = camera_position:
  378. * dot(center - camera_position, cone_axis) >= cone_cutoff * length(center - camera_position) + radius
  379. *
  380. * The formula that uses the apex is slightly more accurate but needs the apex; if you are already using bounding sphere
  381. * to do frustum/occlusion culling, the formula that doesn't use the apex may be preferable.
  382. *
  383. * vertex_positions should have float3 position in the first 12 bytes of each vertex - similar to glVertexPointer
  384. * index_count should be less than or equal to 256*3 (the function assumes clusters of limited size)
  385. */
  386. MESHOPTIMIZER_EXPERIMENTAL struct meshopt_Bounds meshopt_computeClusterBounds(const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride);
  387. MESHOPTIMIZER_EXPERIMENTAL struct meshopt_Bounds meshopt_computeMeshletBounds(const struct meshopt_Meshlet* meshlet, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride);
  388. /**
  389. * Experimental: Spatial sorter
  390. * Generates a remap table that can be used to reorder points for spatial locality.
  391. * Resulting remap table maps old vertices to new vertices and can be used in meshopt_remapVertexBuffer.
  392. *
  393. * destination must contain enough space for the resulting remap table (vertex_count elements)
  394. */
  395. MESHOPTIMIZER_EXPERIMENTAL void meshopt_spatialSortRemap(unsigned int* destination, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride);
  396. /**
  397. * Experimental: Spatial sorter
  398. * Reorders triangles for spatial locality, and generates a new index buffer. The resulting index buffer can be used with other functions like optimizeVertexCache.
  399. *
  400. * destination must contain enough space for the resulting index buffer (index_count elements)
  401. * vertex_positions should have float3 position in the first 12 bytes of each vertex - similar to glVertexPointer
  402. */
  403. MESHOPTIMIZER_EXPERIMENTAL void meshopt_spatialSortTriangles(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride);
  404. /**
  405. * Set allocation callbacks
  406. * These callbacks will be used instead of the default operator new/operator delete for all temporary allocations in the library.
  407. * Note that all algorithms only allocate memory for temporary use.
  408. * allocate/deallocate are always called in a stack-like order - last pointer to be allocated is deallocated first.
  409. */
  410. MESHOPTIMIZER_API void meshopt_setAllocator(void* (*allocate)(size_t), void (*deallocate)(void*));
  411. #ifdef __cplusplus
  412. } /* extern "C" */
  413. #endif
  414. /* Quantization into commonly supported data formats */
  415. #ifdef __cplusplus
  416. /**
  417. * Quantize a float in [0..1] range into an N-bit fixed point unorm value
  418. * Assumes reconstruction function (q / (2^N-1)), which is the case for fixed-function normalized fixed point conversion
  419. * Maximum reconstruction error: 1/2^(N+1)
  420. */
  421. inline int meshopt_quantizeUnorm(float v, int N);
  422. /**
  423. * Quantize a float in [-1..1] range into an N-bit fixed point snorm value
  424. * Assumes reconstruction function (q / (2^(N-1)-1)), which is the case for fixed-function normalized fixed point conversion (except early OpenGL versions)
  425. * Maximum reconstruction error: 1/2^N
  426. */
  427. inline int meshopt_quantizeSnorm(float v, int N);
  428. /**
  429. * Quantize a float into half-precision floating point value
  430. * Generates +-inf for overflow, preserves NaN, flushes denormals to zero, rounds to nearest
  431. * Representable magnitude range: [6e-5; 65504]
  432. * Maximum relative reconstruction error: 5e-4
  433. */
  434. inline unsigned short meshopt_quantizeHalf(float v);
  435. /**
  436. * Quantize a float into a floating point value with a limited number of significant mantissa bits
  437. * Generates +-inf for overflow, preserves NaN, flushes denormals to zero, rounds to nearest
  438. * Assumes N is in a valid mantissa precision range, which is 1..23
  439. */
  440. inline float meshopt_quantizeFloat(float v, int N);
  441. #endif
  442. /**
  443. * C++ template interface
  444. *
  445. * These functions mirror the C interface the library provides, providing template-based overloads so that
  446. * the caller can use an arbitrary type for the index data, both for input and output.
  447. * When the supplied type is the same size as that of unsigned int, the wrappers are zero-cost; when it's not,
  448. * the wrappers end up allocating memory and copying index data to convert from one type to another.
  449. */
  450. #if defined(__cplusplus) && !defined(MESHOPTIMIZER_NO_WRAPPERS)
  451. template <typename T>
  452. inline size_t meshopt_generateVertexRemap(unsigned int* destination, const T* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size);
  453. template <typename T>
  454. inline size_t meshopt_generateVertexRemapMulti(unsigned int* destination, const T* indices, size_t index_count, size_t vertex_count, const meshopt_Stream* streams, size_t stream_count);
  455. template <typename T>
  456. inline void meshopt_remapIndexBuffer(T* destination, const T* indices, size_t index_count, const unsigned int* remap);
  457. template <typename T>
  458. inline void meshopt_generateShadowIndexBuffer(T* destination, const T* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size, size_t vertex_stride);
  459. template <typename T>
  460. inline void meshopt_generateShadowIndexBufferMulti(T* destination, const T* indices, size_t index_count, size_t vertex_count, const meshopt_Stream* streams, size_t stream_count);
  461. template <typename T>
  462. inline void meshopt_optimizeVertexCache(T* destination, const T* indices, size_t index_count, size_t vertex_count);
  463. template <typename T>
  464. inline void meshopt_optimizeVertexCacheStrip(T* destination, const T* indices, size_t index_count, size_t vertex_count);
  465. template <typename T>
  466. inline void meshopt_optimizeVertexCacheFifo(T* destination, const T* indices, size_t index_count, size_t vertex_count, unsigned int cache_size);
  467. template <typename T>
  468. inline void meshopt_optimizeOverdraw(T* destination, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, float threshold);
  469. template <typename T>
  470. inline size_t meshopt_optimizeVertexFetchRemap(unsigned int* destination, const T* indices, size_t index_count, size_t vertex_count);
  471. template <typename T>
  472. inline size_t meshopt_optimizeVertexFetch(void* destination, T* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size);
  473. template <typename T>
  474. inline size_t meshopt_encodeIndexBuffer(unsigned char* buffer, size_t buffer_size, const T* indices, size_t index_count);
  475. template <typename T>
  476. inline int meshopt_decodeIndexBuffer(T* destination, size_t index_count, const unsigned char* buffer, size_t buffer_size);
  477. template <typename T>
  478. inline size_t meshopt_encodeIndexSequence(unsigned char* buffer, size_t buffer_size, const T* indices, size_t index_count);
  479. template <typename T>
  480. inline int meshopt_decodeIndexSequence(T* destination, size_t index_count, const unsigned char* buffer, size_t buffer_size);
  481. template <typename T>
  482. inline size_t meshopt_simplify(T* destination, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, size_t target_index_count, float target_error, float* result_error = 0);
  483. template <typename T>
  484. inline size_t meshopt_simplifySloppy(T* destination, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, size_t target_index_count, float target_error, float* result_error = 0);
  485. template <typename T>
  486. inline size_t meshopt_stripify(T* destination, const T* indices, size_t index_count, size_t vertex_count, T restart_index);
  487. template <typename T>
  488. inline size_t meshopt_unstripify(T* destination, const T* indices, size_t index_count, T restart_index);
  489. template <typename T>
  490. inline meshopt_VertexCacheStatistics meshopt_analyzeVertexCache(const T* indices, size_t index_count, size_t vertex_count, unsigned int cache_size, unsigned int warp_size, unsigned int buffer_size);
  491. template <typename T>
  492. inline meshopt_OverdrawStatistics meshopt_analyzeOverdraw(const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride);
  493. template <typename T>
  494. inline meshopt_VertexFetchStatistics meshopt_analyzeVertexFetch(const T* indices, size_t index_count, size_t vertex_count, size_t vertex_size);
  495. template <typename T>
  496. inline size_t meshopt_buildMeshlets(meshopt_Meshlet* destination, const T* indices, size_t index_count, size_t vertex_count, size_t max_vertices, size_t max_triangles);
  497. template <typename T>
  498. inline meshopt_Bounds meshopt_computeClusterBounds(const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride);
  499. template <typename T>
  500. inline void meshopt_spatialSortTriangles(T* destination, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride);
  501. #endif
  502. /* Inline implementation */
  503. #ifdef __cplusplus
  504. inline int meshopt_quantizeUnorm(float v, int N)
  505. {
  506. const float scale = float((1 << N) - 1);
  507. v = (v >= 0) ? v : 0;
  508. v = (v <= 1) ? v : 1;
  509. return int(v * scale + 0.5f);
  510. }
  511. inline int meshopt_quantizeSnorm(float v, int N)
  512. {
  513. const float scale = float((1 << (N - 1)) - 1);
  514. float round = (v >= 0 ? 0.5f : -0.5f);
  515. v = (v >= -1) ? v : -1;
  516. v = (v <= +1) ? v : +1;
  517. return int(v * scale + round);
  518. }
  519. inline unsigned short meshopt_quantizeHalf(float v)
  520. {
  521. union { float f; unsigned int ui; } u = {v};
  522. unsigned int ui = u.ui;
  523. int s = (ui >> 16) & 0x8000;
  524. int em = ui & 0x7fffffff;
  525. /* bias exponent and round to nearest; 112 is relative exponent bias (127-15) */
  526. int h = (em - (112 << 23) + (1 << 12)) >> 13;
  527. /* underflow: flush to zero; 113 encodes exponent -14 */
  528. h = (em < (113 << 23)) ? 0 : h;
  529. /* overflow: infinity; 143 encodes exponent 16 */
  530. h = (em >= (143 << 23)) ? 0x7c00 : h;
  531. /* NaN; note that we convert all types of NaN to qNaN */
  532. h = (em > (255 << 23)) ? 0x7e00 : h;
  533. return (unsigned short)(s | h);
  534. }
  535. inline float meshopt_quantizeFloat(float v, int N)
  536. {
  537. union { float f; unsigned int ui; } u = {v};
  538. unsigned int ui = u.ui;
  539. const int mask = (1 << (23 - N)) - 1;
  540. const int round = (1 << (23 - N)) >> 1;
  541. int e = ui & 0x7f800000;
  542. unsigned int rui = (ui + round) & ~mask;
  543. /* round all numbers except inf/nan; this is important to make sure nan doesn't overflow into -0 */
  544. ui = e == 0x7f800000 ? ui : rui;
  545. /* flush denormals to zero */
  546. ui = e == 0 ? 0 : ui;
  547. u.ui = ui;
  548. return u.f;
  549. }
  550. #endif
  551. /* Internal implementation helpers */
  552. #ifdef __cplusplus
  553. class meshopt_Allocator
  554. {
  555. public:
  556. template <typename T>
  557. struct StorageT
  558. {
  559. static void* (*allocate)(size_t);
  560. static void (*deallocate)(void*);
  561. };
  562. typedef StorageT<void> Storage;
  563. meshopt_Allocator()
  564. : blocks()
  565. , count(0)
  566. {
  567. }
  568. ~meshopt_Allocator()
  569. {
  570. for (size_t i = count; i > 0; --i)
  571. Storage::deallocate(blocks[i - 1]);
  572. }
  573. template <typename T> T* allocate(size_t size)
  574. {
  575. assert(count < sizeof(blocks) / sizeof(blocks[0]));
  576. T* result = static_cast<T*>(Storage::allocate(size > size_t(-1) / sizeof(T) ? size_t(-1) : size * sizeof(T)));
  577. blocks[count++] = result;
  578. return result;
  579. }
  580. private:
  581. void* blocks[24];
  582. size_t count;
  583. };
  584. // This makes sure that allocate/deallocate are lazily generated in translation units that need them and are deduplicated by the linker
  585. template <typename T> void* (*meshopt_Allocator::StorageT<T>::allocate)(size_t) = operator new;
  586. template <typename T> void (*meshopt_Allocator::StorageT<T>::deallocate)(void*) = operator delete;
  587. #endif
  588. /* Inline implementation for C++ templated wrappers */
  589. #if defined(__cplusplus) && !defined(MESHOPTIMIZER_NO_WRAPPERS)
  590. template <typename T, bool ZeroCopy = sizeof(T) == sizeof(unsigned int)>
  591. struct meshopt_IndexAdapter;
  592. template <typename T>
  593. struct meshopt_IndexAdapter<T, false>
  594. {
  595. T* result;
  596. unsigned int* data;
  597. size_t count;
  598. meshopt_IndexAdapter(T* result_, const T* input, size_t count_)
  599. : result(result_)
  600. , data(0)
  601. , count(count_)
  602. {
  603. size_t size = count > size_t(-1) / sizeof(unsigned int) ? size_t(-1) : count * sizeof(unsigned int);
  604. data = static_cast<unsigned int*>(meshopt_Allocator::Storage::allocate(size));
  605. if (input)
  606. {
  607. for (size_t i = 0; i < count; ++i)
  608. data[i] = input[i];
  609. }
  610. }
  611. ~meshopt_IndexAdapter()
  612. {
  613. if (result)
  614. {
  615. for (size_t i = 0; i < count; ++i)
  616. result[i] = T(data[i]);
  617. }
  618. meshopt_Allocator::Storage::deallocate(data);
  619. }
  620. };
  621. template <typename T>
  622. struct meshopt_IndexAdapter<T, true>
  623. {
  624. unsigned int* data;
  625. meshopt_IndexAdapter(T* result, const T* input, size_t)
  626. : data(reinterpret_cast<unsigned int*>(result ? result : const_cast<T*>(input)))
  627. {
  628. }
  629. };
  630. template <typename T>
  631. inline size_t meshopt_generateVertexRemap(unsigned int* destination, const T* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size)
  632. {
  633. meshopt_IndexAdapter<T> in(0, indices, indices ? index_count : 0);
  634. return meshopt_generateVertexRemap(destination, indices ? in.data : 0, index_count, vertices, vertex_count, vertex_size);
  635. }
  636. template <typename T>
  637. inline size_t meshopt_generateVertexRemapMulti(unsigned int* destination, const T* indices, size_t index_count, size_t vertex_count, const meshopt_Stream* streams, size_t stream_count)
  638. {
  639. meshopt_IndexAdapter<T> in(0, indices, indices ? index_count : 0);
  640. return meshopt_generateVertexRemapMulti(destination, indices ? in.data : 0, index_count, vertex_count, streams, stream_count);
  641. }
  642. template <typename T>
  643. inline void meshopt_remapIndexBuffer(T* destination, const T* indices, size_t index_count, const unsigned int* remap)
  644. {
  645. meshopt_IndexAdapter<T> in(0, indices, indices ? index_count : 0);
  646. meshopt_IndexAdapter<T> out(destination, 0, index_count);
  647. meshopt_remapIndexBuffer(out.data, indices ? in.data : 0, index_count, remap);
  648. }
  649. template <typename T>
  650. inline void meshopt_generateShadowIndexBuffer(T* destination, const T* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size, size_t vertex_stride)
  651. {
  652. meshopt_IndexAdapter<T> in(0, indices, index_count);
  653. meshopt_IndexAdapter<T> out(destination, 0, index_count);
  654. meshopt_generateShadowIndexBuffer(out.data, in.data, index_count, vertices, vertex_count, vertex_size, vertex_stride);
  655. }
  656. template <typename T>
  657. inline void meshopt_generateShadowIndexBufferMulti(T* destination, const T* indices, size_t index_count, size_t vertex_count, const meshopt_Stream* streams, size_t stream_count)
  658. {
  659. meshopt_IndexAdapter<T> in(0, indices, index_count);
  660. meshopt_IndexAdapter<T> out(destination, 0, index_count);
  661. meshopt_generateShadowIndexBufferMulti(out.data, in.data, index_count, vertex_count, streams, stream_count);
  662. }
  663. template <typename T>
  664. inline void meshopt_optimizeVertexCache(T* destination, const T* indices, size_t index_count, size_t vertex_count)
  665. {
  666. meshopt_IndexAdapter<T> in(0, indices, index_count);
  667. meshopt_IndexAdapter<T> out(destination, 0, index_count);
  668. meshopt_optimizeVertexCache(out.data, in.data, index_count, vertex_count);
  669. }
  670. template <typename T>
  671. inline void meshopt_optimizeVertexCacheStrip(T* destination, const T* indices, size_t index_count, size_t vertex_count)
  672. {
  673. meshopt_IndexAdapter<T> in(0, indices, index_count);
  674. meshopt_IndexAdapter<T> out(destination, 0, index_count);
  675. meshopt_optimizeVertexCacheStrip(out.data, in.data, index_count, vertex_count);
  676. }
  677. template <typename T>
  678. inline void meshopt_optimizeVertexCacheFifo(T* destination, const T* indices, size_t index_count, size_t vertex_count, unsigned int cache_size)
  679. {
  680. meshopt_IndexAdapter<T> in(0, indices, index_count);
  681. meshopt_IndexAdapter<T> out(destination, 0, index_count);
  682. meshopt_optimizeVertexCacheFifo(out.data, in.data, index_count, vertex_count, cache_size);
  683. }
  684. template <typename T>
  685. inline void meshopt_optimizeOverdraw(T* destination, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, float threshold)
  686. {
  687. meshopt_IndexAdapter<T> in(0, indices, index_count);
  688. meshopt_IndexAdapter<T> out(destination, 0, index_count);
  689. meshopt_optimizeOverdraw(out.data, in.data, index_count, vertex_positions, vertex_count, vertex_positions_stride, threshold);
  690. }
  691. template <typename T>
  692. inline size_t meshopt_optimizeVertexFetchRemap(unsigned int* destination, const T* indices, size_t index_count, size_t vertex_count)
  693. {
  694. meshopt_IndexAdapter<T> in(0, indices, index_count);
  695. return meshopt_optimizeVertexFetchRemap(destination, in.data, index_count, vertex_count);
  696. }
  697. template <typename T>
  698. inline size_t meshopt_optimizeVertexFetch(void* destination, T* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size)
  699. {
  700. meshopt_IndexAdapter<T> inout(indices, indices, index_count);
  701. return meshopt_optimizeVertexFetch(destination, inout.data, index_count, vertices, vertex_count, vertex_size);
  702. }
  703. template <typename T>
  704. inline size_t meshopt_encodeIndexBuffer(unsigned char* buffer, size_t buffer_size, const T* indices, size_t index_count)
  705. {
  706. meshopt_IndexAdapter<T> in(0, indices, index_count);
  707. return meshopt_encodeIndexBuffer(buffer, buffer_size, in.data, index_count);
  708. }
  709. template <typename T>
  710. inline int meshopt_decodeIndexBuffer(T* destination, size_t index_count, const unsigned char* buffer, size_t buffer_size)
  711. {
  712. char index_size_valid[sizeof(T) == 2 || sizeof(T) == 4 ? 1 : -1];
  713. (void)index_size_valid;
  714. return meshopt_decodeIndexBuffer(destination, index_count, sizeof(T), buffer, buffer_size);
  715. }
  716. template <typename T>
  717. inline size_t meshopt_encodeIndexSequence(unsigned char* buffer, size_t buffer_size, const T* indices, size_t index_count)
  718. {
  719. meshopt_IndexAdapter<T> in(0, indices, index_count);
  720. return meshopt_encodeIndexSequence(buffer, buffer_size, in.data, index_count);
  721. }
  722. template <typename T>
  723. inline int meshopt_decodeIndexSequence(T* destination, size_t index_count, const unsigned char* buffer, size_t buffer_size)
  724. {
  725. char index_size_valid[sizeof(T) == 2 || sizeof(T) == 4 ? 1 : -1];
  726. (void)index_size_valid;
  727. return meshopt_decodeIndexSequence(destination, index_count, sizeof(T), buffer, buffer_size);
  728. }
  729. template <typename T>
  730. inline size_t meshopt_simplify(T* destination, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, size_t target_index_count, float target_error, float* result_error)
  731. {
  732. meshopt_IndexAdapter<T> in(0, indices, index_count);
  733. meshopt_IndexAdapter<T> out(destination, 0, index_count);
  734. return meshopt_simplify(out.data, in.data, index_count, vertex_positions, vertex_count, vertex_positions_stride, target_index_count, target_error, result_error);
  735. }
  736. template <typename T>
  737. inline size_t meshopt_simplifySloppy(T* destination, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, size_t target_index_count, float target_error, float* result_error)
  738. {
  739. meshopt_IndexAdapter<T> in(0, indices, index_count);
  740. meshopt_IndexAdapter<T> out(destination, 0, index_count);
  741. return meshopt_simplifySloppy(out.data, in.data, index_count, vertex_positions, vertex_count, vertex_positions_stride, target_index_count, target_error, result_error);
  742. }
  743. template <typename T>
  744. inline size_t meshopt_stripify(T* destination, const T* indices, size_t index_count, size_t vertex_count, T restart_index)
  745. {
  746. meshopt_IndexAdapter<T> in(0, indices, index_count);
  747. meshopt_IndexAdapter<T> out(destination, 0, (index_count / 3) * 5);
  748. return meshopt_stripify(out.data, in.data, index_count, vertex_count, unsigned(restart_index));
  749. }
  750. template <typename T>
  751. inline size_t meshopt_unstripify(T* destination, const T* indices, size_t index_count, T restart_index)
  752. {
  753. meshopt_IndexAdapter<T> in(0, indices, index_count);
  754. meshopt_IndexAdapter<T> out(destination, 0, (index_count - 2) * 3);
  755. return meshopt_unstripify(out.data, in.data, index_count, unsigned(restart_index));
  756. }
  757. template <typename T>
  758. inline meshopt_VertexCacheStatistics meshopt_analyzeVertexCache(const T* indices, size_t index_count, size_t vertex_count, unsigned int cache_size, unsigned int warp_size, unsigned int buffer_size)
  759. {
  760. meshopt_IndexAdapter<T> in(0, indices, index_count);
  761. return meshopt_analyzeVertexCache(in.data, index_count, vertex_count, cache_size, warp_size, buffer_size);
  762. }
  763. template <typename T>
  764. inline meshopt_OverdrawStatistics meshopt_analyzeOverdraw(const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride)
  765. {
  766. meshopt_IndexAdapter<T> in(0, indices, index_count);
  767. return meshopt_analyzeOverdraw(in.data, index_count, vertex_positions, vertex_count, vertex_positions_stride);
  768. }
  769. template <typename T>
  770. inline meshopt_VertexFetchStatistics meshopt_analyzeVertexFetch(const T* indices, size_t index_count, size_t vertex_count, size_t vertex_size)
  771. {
  772. meshopt_IndexAdapter<T> in(0, indices, index_count);
  773. return meshopt_analyzeVertexFetch(in.data, index_count, vertex_count, vertex_size);
  774. }
  775. template <typename T>
  776. inline size_t meshopt_buildMeshlets(meshopt_Meshlet* destination, const T* indices, size_t index_count, size_t vertex_count, size_t max_vertices, size_t max_triangles)
  777. {
  778. meshopt_IndexAdapter<T> in(0, indices, index_count);
  779. return meshopt_buildMeshlets(destination, in.data, index_count, vertex_count, max_vertices, max_triangles);
  780. }
  781. template <typename T>
  782. inline meshopt_Bounds meshopt_computeClusterBounds(const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride)
  783. {
  784. meshopt_IndexAdapter<T> in(0, indices, index_count);
  785. return meshopt_computeClusterBounds(in.data, index_count, vertex_positions, vertex_count, vertex_positions_stride);
  786. }
  787. template <typename T>
  788. inline void meshopt_spatialSortTriangles(T* destination, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride)
  789. {
  790. meshopt_IndexAdapter<T> in(0, indices, index_count);
  791. meshopt_IndexAdapter<T> out(destination, 0, index_count);
  792. meshopt_spatialSortTriangles(out.data, in.data, index_count, vertex_positions, vertex_count, vertex_positions_stride);
  793. }
  794. #endif
  795. /**
  796. * Copyright (c) 2016-2020 Arseny Kapoulkine
  797. *
  798. * Permission is hereby granted, free of charge, to any person
  799. * obtaining a copy of this software and associated documentation
  800. * files (the "Software"), to deal in the Software without
  801. * restriction, including without limitation the rights to use,
  802. * copy, modify, merge, publish, distribute, sublicense, and/or sell
  803. * copies of the Software, and to permit persons to whom the
  804. * Software is furnished to do so, subject to the following
  805. * conditions:
  806. *
  807. * The above copyright notice and this permission notice shall be
  808. * included in all copies or substantial portions of the Software.
  809. *
  810. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  811. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
  812. * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  813. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
  814. * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
  815. * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  816. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  817. * OTHER DEALINGS IN THE SOFTWARE.
  818. */