mesh.h 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787
  1. // Copyright 2011 Google Inc. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License"); you
  4. // may not use this file except in compliance with the License. You
  5. // may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
  12. // implied. See the License for the specific language governing
  13. // permissions and limitations under the License.
  14. #ifndef WEBGL_LOADER_MESH_H_
  15. #define WEBGL_LOADER_MESH_H_
  16. #include <float.h>
  17. #include <math.h>
  18. #include <stdio.h>
  19. #include <stdlib.h>
  20. #include <map>
  21. #include <string>
  22. #include <utility>
  23. #include <vector>
  24. typedef unsigned short uint16;
  25. typedef short int16;
  26. typedef std::vector<float> AttribList;
  27. typedef std::vector<int> IndexList;
  28. typedef std::vector<uint16> QuantizedAttribList;
  29. struct DrawMesh {
  30. // Interleaved vertex format:
  31. // 3-D Position
  32. // 3-D Normal
  33. // 2-D TexCoord
  34. // Note that these
  35. AttribList interleaved_attribs;
  36. // Indices are 0-indexed.
  37. IndexList triangle_indices;
  38. };
  39. void DumpJsonFromQuantizedAttribs(const QuantizedAttribList& attribs) {
  40. puts("var attribs = new Uint16Array([");
  41. for (size_t i = 0; i < attribs.size(); i += 8) {
  42. printf("%u,%hu,%hu,%hu,%hu,%hu,%hu,%hu,\n",
  43. attribs[i + 0], attribs[i + 1], attribs[i + 2], attribs[i + 3],
  44. attribs[i + 4], attribs[i + 5], attribs[i + 6], attribs[i + 7]);
  45. }
  46. puts("]);");
  47. }
  48. void DumpJsonFromInterleavedAttribs(const AttribList& attribs) {
  49. puts("var attribs = new Float32Array([");
  50. for (size_t i = 0; i < attribs.size(); i += 8) {
  51. printf("%f,%f,%f,%f,%f,%f,%f,%f,\n",
  52. attribs[i + 0], attribs[i + 1], attribs[i + 2], attribs[i + 3],
  53. attribs[i + 4], attribs[i + 5], attribs[i + 6], attribs[i + 7]);
  54. }
  55. puts("]);");
  56. }
  57. void DumpJsonFromIndices(const IndexList& indices) {
  58. puts("var indices = new Uint16Array([");
  59. for (size_t i = 0; i < indices.size(); i += 3) {
  60. printf("%d,%d,%d,\n", indices[i + 0], indices[i + 1], indices[i + 2]);
  61. }
  62. puts("]);");
  63. }
  64. // A short list of floats, useful for parsing a single vector
  65. // attribute.
  66. class ShortFloatList {
  67. public:
  68. static const size_t kMaxNumFloats = 4;
  69. ShortFloatList()
  70. : size_(0) { }
  71. // Parse up to kMaxNumFloats from C string.
  72. size_t ParseLine(const char* line) {
  73. for (size_ = 0; size_ != kMaxNumFloats; ++size_) {
  74. char* endptr = NULL;
  75. a_[size_] = strtof(line, &endptr);
  76. if (endptr == NULL || line == endptr) break;
  77. line = endptr;
  78. }
  79. return size_;
  80. }
  81. void AppendTo(AttribList* attribs) const {
  82. attribs->insert(attribs->end(), a_, a_ + size_);
  83. }
  84. bool empty() const { return size_ == 0; }
  85. size_t size() const { return size_; }
  86. private:
  87. float a_[kMaxNumFloats];
  88. size_t size_;
  89. };
  90. class IndexFlattener {
  91. public:
  92. explicit IndexFlattener(size_t num_positions)
  93. : count_(0),
  94. table_(num_positions) {
  95. }
  96. int count() const { return count_; }
  97. // Returns a pair of: < flattened index, newly inserted >.
  98. std::pair<int, bool> GetFlattenedIndex(int position_index,
  99. int texcoord_index,
  100. int normal_index) {
  101. // First, optimistically look up position_index in the table.
  102. IndexType& index = table_[position_index];
  103. if (index.position_or_flat == kIndexUnknown) {
  104. // This is the first time we've seen this position in the table,
  105. // so fill it. Since the table is indexed by position, we can
  106. // use the position_or_flat_index field to store the flat index.
  107. const int flat_index = count_++;
  108. index.position_or_flat = flat_index;
  109. index.texcoord = texcoord_index;
  110. index.normal = normal_index;
  111. return std::make_pair(flat_index, true);
  112. } else if (index.position_or_flat == kIndexNotInTable) {
  113. // There are multiple flattened indices at this position index,
  114. // so resort to the map.
  115. return GetFlattenedIndexFromMap(position_index,
  116. texcoord_index,
  117. normal_index);
  118. } else if (index.texcoord == texcoord_index &&
  119. index.normal == normal_index) {
  120. // The other indices match, so we can use the value cached in
  121. // the table.
  122. return std::make_pair(index.position_or_flat, false);
  123. }
  124. // The other indices don't match, so we mark this table entry,
  125. // and insert both the old and new indices into the map.
  126. const IndexType old_index(position_index, index.texcoord, index.normal);
  127. map_.insert(std::make_pair(old_index, index.position_or_flat));
  128. index.position_or_flat = kIndexNotInTable;
  129. const IndexType new_index(position_index, texcoord_index, normal_index);
  130. const int flat_index = count_++;
  131. map_.insert(std::make_pair(new_index, flat_index));
  132. return std::make_pair(flat_index, true);
  133. }
  134. private:
  135. std::pair<int, bool> GetFlattenedIndexFromMap(int position_index,
  136. int texcoord_index,
  137. int normal_index) {
  138. IndexType index(position_index, texcoord_index, normal_index);
  139. MapType::iterator iter = map_.lower_bound(index);
  140. if (iter == map_.end() || iter->first != index) {
  141. const int flat_index = count_++;
  142. map_.insert(iter, std::make_pair(index, flat_index));
  143. return std::make_pair(flat_index, true);
  144. } else {
  145. return std::make_pair(iter->second, false);
  146. }
  147. }
  148. static const int kIndexUnknown = -1;
  149. static const int kIndexNotInTable = -2;
  150. struct IndexType {
  151. IndexType()
  152. : position_or_flat(kIndexUnknown),
  153. texcoord(kIndexUnknown),
  154. normal(kIndexUnknown)
  155. { }
  156. IndexType(int position_index, int texcoord_index, int normal_index)
  157. : position_or_flat(position_index),
  158. texcoord(texcoord_index),
  159. normal(normal_index)
  160. { }
  161. // I'm being tricky/lazy here. The table_ stores the flattened
  162. // index in the first field, since it is indexed by position. The
  163. // map_ stores position and uses this struct as a key to lookup the
  164. // flattened index.
  165. int position_or_flat;
  166. int texcoord;
  167. int normal;
  168. // An ordering for std::map.
  169. bool operator<(const IndexType& that) const {
  170. if (position_or_flat == that.position_or_flat) {
  171. if (texcoord == that.texcoord) {
  172. return normal < that.normal;
  173. } else {
  174. return texcoord < that.texcoord;
  175. }
  176. } else {
  177. return position_or_flat < that.position_or_flat;
  178. }
  179. }
  180. bool operator==(const IndexType& that) const {
  181. return position_or_flat == that.position_or_flat &&
  182. texcoord == that.texcoord && normal == that.normal;
  183. }
  184. bool operator!=(const IndexType& that) const {
  185. return !operator==(that);
  186. }
  187. };
  188. typedef std::map<IndexType, int> MapType;
  189. int count_;
  190. std::vector<IndexType> table_;
  191. MapType map_;
  192. };
  193. // TODO: consider splitting this into a low-level parser and a high-level
  194. // object.
  195. class WavefrontObjFile {
  196. public:
  197. struct Group {
  198. std::string name;
  199. size_t start, end;
  200. };
  201. typedef std::vector<Group> GroupList;
  202. explicit WavefrontObjFile(FILE* fp) {
  203. ParseFile(fp);
  204. };
  205. const GroupList& groups() const { return groups_; }
  206. // Populate draw_meshes.
  207. void CreateDrawMeshes(std::vector<DrawMesh>* draw_meshes) {
  208. draw_meshes->push_back(DrawMesh());
  209. DrawMesh& draw_mesh = draw_meshes->back();
  210. IndexFlattener flattener(positions_.size() / positionDim());
  211. for (size_t i = 0; i < faces_.size(); i += 3) {
  212. // .OBJ files use 1-based indexing.
  213. const int position_index = faces_[i + 0] - 1;
  214. const int texcoord_index = faces_[i + 1] - 1;
  215. const int normal_index = faces_[i + 2] - 1;
  216. const std::pair<int, bool> flattened = flattener.GetFlattenedIndex(
  217. position_index, texcoord_index, normal_index);
  218. draw_mesh.triangle_indices.push_back(flattened.first);
  219. if (flattened.second) {
  220. for (size_t i = 0; i < positionDim(); ++i) {
  221. draw_mesh.interleaved_attribs.push_back(
  222. positions_[positionDim() * position_index + i]);
  223. }
  224. for (size_t i = 0; i < texcoordDim(); ++i) {
  225. draw_mesh.interleaved_attribs.push_back(
  226. texcoords_[texcoordDim() * texcoord_index + i]);
  227. }
  228. for (size_t i = 0; i < normalDim(); ++i) {
  229. draw_mesh.interleaved_attribs.push_back(
  230. normals_[normalDim() * normal_index + i]);
  231. }
  232. }
  233. }
  234. }
  235. /*
  236. // %z formatting chokes MinGW compiler on Windows :/
  237. // using instead unsigned long
  238. void DumpDebug() const {
  239. printf("positions size: %zu\ntexcoords size: %zu\nnormals size: %zu"
  240. "\nfaces size: %zu\n", positions_.size(), texcoords_.size(),
  241. normals_.size(), faces_.size());
  242. }
  243. */
  244. void DumpDebug() const {
  245. printf("positions size: %lu\ntexcoords size: %lu\nnormals size: %lu"
  246. "\nfaces size: %lu\n", (unsigned long)positions_.size(), (unsigned long)texcoords_.size(),
  247. (unsigned long)normals_.size(), (unsigned long)faces_.size());
  248. }
  249. private:
  250. void ParseFile(FILE* fp) {
  251. // TODO: don't use a fixed-size buffer.
  252. const size_t kLineBufferSize = 256;
  253. char buffer[kLineBufferSize];
  254. unsigned int line_num = 1;
  255. while (fgets(buffer, kLineBufferSize, fp) != NULL) {
  256. const char* stripped = buffer;
  257. while (isspace(*stripped)) {
  258. ++stripped;
  259. }
  260. ParseLine(stripped, line_num++);
  261. }
  262. }
  263. void ParseLine(const char* line, unsigned int line_num) {
  264. switch (*line) {
  265. case 'v':
  266. ParseAttrib(line + 1, line_num);
  267. break;
  268. case 'f':
  269. ParseFace(line + 1, line_num);
  270. break;
  271. case 'g':
  272. ParseGroup(line + 1, line_num);
  273. break;
  274. case '\0':
  275. case '#':
  276. break; // Do nothing for comments or blank lines.
  277. case 'p':
  278. WarnLine("point unsupported", line_num);
  279. break;
  280. case 'l':
  281. WarnLine("line unsupported", line_num);
  282. break;
  283. case 'u':
  284. WarnLine("usemtl (?) unsupported", line_num);
  285. break;
  286. case 'm':
  287. WarnLine("mtllib (?) unsupported", line_num);
  288. break;
  289. case 's':
  290. WarnLine("s unsupported", line_num);
  291. break;
  292. default:
  293. WarnLine("unknown keyword", line_num);
  294. break;
  295. }
  296. }
  297. void ParseAttrib(const char* line, unsigned int line_num) {
  298. ShortFloatList floats;
  299. floats.ParseLine(line + 1);
  300. if (isspace(*line)) {
  301. ParsePosition(floats, line_num);
  302. } else if (*line == 't') {
  303. ParseTexCoord(floats, line_num);
  304. } else if (*line == 'n') {
  305. ParseNormal(floats, line_num);
  306. } else {
  307. WarnLine("unknown attribute format", line_num);
  308. }
  309. }
  310. void ParsePosition(const ShortFloatList& floats, unsigned int line_num) {
  311. if (floats.size() != positionDim()) {
  312. ErrorLine("bad position", line_num);
  313. }
  314. floats.AppendTo(&positions_);
  315. }
  316. void ParseTexCoord(const ShortFloatList& floats, unsigned int line_num) {
  317. if (floats.size() != texcoordDim()) {
  318. ErrorLine("bad texcoord", line_num);
  319. }
  320. floats.AppendTo(&texcoords_);
  321. }
  322. void ParseNormal(const ShortFloatList& floats, unsigned int line_num) {
  323. if (floats.size() != normalDim()) {
  324. ErrorLine("bad normal", line_num);
  325. }
  326. floats.AppendTo(&normals_);
  327. }
  328. // Parses faces and converts to triangle fans. This is not a
  329. // particularly good tesselation in general case, but it is really
  330. // simple, and is perfectly fine for triangles and quads.
  331. void ParseFace(const char* line, unsigned int line_num) {
  332. // Also handle face outlines as faces.
  333. if (*line == 'o') ++line;
  334. // TODO: instead of storing these indices as-is, it might make
  335. // sense to flatten them right away. This can reduce memory
  336. // consumption and improve access locality, especially since .OBJ
  337. // face indices are so needlessly large.
  338. int indices[9] = { 0 };
  339. // The first index acts as the pivot for the triangle fan.
  340. line = ParseIndices(line, line_num, indices + 0, indices + 1, indices + 2);
  341. if (line == NULL) {
  342. ErrorLine("bad first index", line_num);
  343. }
  344. line = ParseIndices(line, line_num, indices + 3, indices + 4, indices + 5);
  345. if (line == NULL) {
  346. ErrorLine("bad second index", line_num);
  347. }
  348. // After the first two indices, each index introduces a new
  349. // triangle to the fan.
  350. while ((line = ParseIndices(line, line_num,
  351. indices + 6, indices + 7, indices + 8))) {
  352. faces_.insert(faces_.end(), indices, indices + 9);
  353. // The most recent vertex is reused for the next triangle.
  354. indices[3] = indices[6];
  355. indices[4] = indices[7];
  356. indices[5] = indices[8];
  357. indices[6] = indices[7] = indices[8] = 0;
  358. }
  359. }
  360. // Parse a single group of indices, separated by slashes ('/').
  361. // TODO: convert negative indices (that is, relative to the end of
  362. // the current vertex positions) to more conventional positive
  363. // indices.
  364. const char* ParseIndices(const char* line, unsigned int line_num,
  365. int* position_index, int* texcoord_index,
  366. int* normal_index) {
  367. int bytes_consumed = 0;
  368. int indices_parsed = sscanf(line, "%d/%d/%d%n",
  369. position_index, texcoord_index, normal_index,
  370. &bytes_consumed);
  371. if (indices_parsed != 3) {
  372. return NULL;
  373. }
  374. if (*position_index <= 0 || *texcoord_index <= 0 || *normal_index <= 0 ) {
  375. ErrorLine("bad index format", line_num);
  376. }
  377. return line + bytes_consumed;
  378. }
  379. void ParseGroup(const char* line, unsigned int line_num) {
  380. WarnLine("group unsupported", line_num);
  381. }
  382. void WarnLine(const char* why, unsigned int line_num) {
  383. fprintf(stderr, "WARNING: %s at line %u\n", why, line_num);
  384. }
  385. void ErrorLine(const char* why, unsigned int line_num) {
  386. fprintf(stderr, "ERROR: %s at line %u\n", why, line_num);
  387. exit(-1);
  388. }
  389. static size_t positionDim() { return 3; }
  390. static size_t texcoordDim() { return 2; }
  391. static size_t normalDim() { return 3; }
  392. AttribList positions_;
  393. AttribList texcoords_;
  394. AttribList normals_;
  395. // Indices are 1-indexed, and per-attrib.
  396. IndexList faces_;
  397. GroupList groups_;
  398. };
  399. // Axis-aligned bounding box
  400. struct AABB {
  401. float mins[3];
  402. float maxes[3];
  403. };
  404. void DumpJsonFromAABB(const AABB& aabb) {
  405. printf("var aabb = { mins: [%f, %f, %f], maxes: [%f, %f, %f] };\n",
  406. aabb.mins[0], aabb.mins[1], aabb.mins[2],
  407. aabb.maxes[0], aabb.maxes[1], aabb.maxes[2]);
  408. }
  409. float UniformScaleFromAABB(const AABB& aabb) {
  410. const float x = aabb.maxes[0] - aabb.mins[0];
  411. const float y = aabb.maxes[1] - aabb.mins[1];
  412. const float z = aabb.maxes[2] - aabb.mins[2];
  413. return (x > y)
  414. ? ((x > z) ? x : z)
  415. : ((y > z) ? y : z);
  416. }
  417. void AABBToCenter(const AABB& aabb, float center[3]) {
  418. for (size_t i = 0; i < 3; ++i) {
  419. center[i] = 0.5*(aabb.mins[i] + aabb.maxes[i]);
  420. }
  421. }
  422. AABB AABBFromAttribs(const AttribList& interleaved_attribs) {
  423. AABB aabb;
  424. for (size_t i = 0; i < 3; ++i) {
  425. aabb.mins[i] = FLT_MAX;
  426. aabb.maxes[i] = -FLT_MAX;
  427. }
  428. for (size_t i = 0; i < interleaved_attribs.size(); i += 8) {
  429. for (size_t j = 0; j < 3; ++j) {
  430. const float attrib = interleaved_attribs[i + j];
  431. if (aabb.mins[j] > attrib) {
  432. aabb.mins[j] = attrib;
  433. }
  434. if (aabb.maxes[j] < attrib) {
  435. aabb.maxes[j] = attrib;
  436. }
  437. }
  438. }
  439. return aabb;
  440. }
  441. uint16 Quantize(float f, float offset, float range, int bits) {
  442. const float f_offset = f + offset;
  443. // Losslessly multiply a float by 1 << bits;
  444. const float f_scaled = ldexpf(f_offset, bits);
  445. // static_cast rounds towards zero (i.e. truncates).
  446. return static_cast<uint16>(f_scaled / range - 0.5f);
  447. }
  448. void AttribsToQuantizedAttribs(const AttribList& interleaved_attribs,
  449. QuantizedAttribList* quantized_attribs) {
  450. const AABB aabb = AABBFromAttribs(interleaved_attribs);
  451. const float scale = UniformScaleFromAABB(aabb);
  452. quantized_attribs->resize(interleaved_attribs.size());
  453. const float offsets[8] = { -aabb.mins[0], -aabb.mins[1], -aabb.mins[2],
  454. 0.0f, 0.0f, 1.f, 1.f, 1.f };
  455. const float scales[8] = { scale, scale, scale, 1.f, 1.f, 2.f, 2.f, 2.f };
  456. const int bits[8] = { 14, 14, 14, 10, 10, 10, 10, 10 };
  457. for (size_t i = 0; i < interleaved_attribs.size(); i += 8) {
  458. for (size_t j = 0; j < 8; ++j) {
  459. quantized_attribs->at(i + j) = Quantize(interleaved_attribs[i + j],
  460. offsets[j], scales[j], bits[j]);
  461. }
  462. }
  463. // dump for reconstruction of real dimensions in JavaScript
  464. // (probably should be embedded as a part of the UTF8 file)
  465. fprintf(stderr, "scale: %f, offsetX: %f, offsetY: %f, offsetZ: %f\n", scale, aabb.mins[0], aabb.mins[1], aabb.mins[2] );
  466. }
  467. // Based on:
  468. // http://home.comcast.net/~tom_forsyth/papers/fast_vert_cache_opt.html
  469. class VertexOptimizer {
  470. public:
  471. // TODO: this could easily work with non-quantized attribute lists.
  472. VertexOptimizer(const QuantizedAttribList& attribs, const IndexList& indices)
  473. : attribs_(attribs),
  474. indices_(indices),
  475. per_vertex_data_(attribs_.size() / 8) {
  476. // The cache has an extra slot allocated to simplify the logic in
  477. // InsertIndexToCache.
  478. for (unsigned int i = 0; i <= kCacheSize; ++i) {
  479. cache_[i] = kUnknownIndex;
  480. }
  481. // Loop through the indices, incrementing active triangle counts.
  482. for (size_t i = 0; i < indices_.size(); ++i) {
  483. per_vertex_data_[indices_[i]].active_tris++;
  484. }
  485. // Compute initial vertex scores.
  486. for (size_t i = 0; i < per_vertex_data_.size(); ++i) {
  487. per_vertex_data_[i].UpdateScore();
  488. }
  489. }
  490. void GetOptimizedMesh(QuantizedAttribList* attribs, IndexList* indices) {
  491. attribs->resize(attribs_.size());
  492. indices->resize(indices_.size());
  493. uint16* attribs_out = &attribs->at(0);
  494. int* indices_out = &indices->at(0);
  495. int next_unused_index = 0;
  496. // Consume indices_, one triangle at a time. When a triangle is consumed from
  497. // the middle of indices_, the last one is copied in to replace it so that we
  498. // can shrink indices_ from the end.
  499. while (!indices_.empty()) {
  500. const size_t best_triangle = FindBestTriangle();
  501. const size_t last_triangle = indices_.size() - 3;
  502. // Go through the indices of the best triangle.
  503. for (size_t i = 0; i < 3; ++i) {
  504. const int index = indices_[best_triangle + i];
  505. // After consuming this vertex, copy the corresponding index
  506. // from the last triangle into this slot.
  507. indices_[best_triangle + i] = indices_[last_triangle + i];
  508. per_vertex_data_[index].active_tris--;
  509. InsertIndexToCache(index);
  510. const int cached_output_index = per_vertex_data_[index].output_index;
  511. // Have we seen this index before?
  512. if (cached_output_index != kUnknownIndex) {
  513. *indices_out++ = cached_output_index;
  514. continue;
  515. }
  516. // The first time we see an index, not only do we increment
  517. // next_index counter, but we must also copy the corresponding
  518. // attributes.
  519. per_vertex_data_[index].output_index = next_unused_index;
  520. for (size_t j = 0; j < 8; ++j) {
  521. *attribs_out++ = attribs_[8*index + j];
  522. }
  523. *indices_out++ = next_unused_index++;
  524. }
  525. // Remove the last triangle.
  526. indices_.resize(last_triangle);
  527. }
  528. }
  529. private:
  530. static const int kUnknownIndex = -1;
  531. static const uint16 kCacheSize = 32;
  532. struct VertexData {
  533. VertexData()
  534. : active_tris(0),
  535. cache_tag(kCacheSize),
  536. output_index(kUnknownIndex),
  537. score(-1.f)
  538. { }
  539. void UpdateScore() {
  540. if (active_tris <= 0) {
  541. score = -1.f;
  542. return;
  543. }
  544. // TODO: build initial score table.
  545. if (cache_tag < 3) {
  546. // The most recent triangle should has a fixed score to
  547. // discourage generating nothing but really long strips. If we
  548. // want strips, we should use a different optimizer.
  549. const float kLastTriScore = 0.75f;
  550. score = kLastTriScore;
  551. } else if (cache_tag < kCacheSize) {
  552. // Points for being recently used.
  553. const float kScale = 1.f / (kCacheSize - 3);
  554. const float kCacheDecayPower = 1.5f;
  555. score = powf(1.f - kScale * (cache_tag - 3), kCacheDecayPower);
  556. } else {
  557. // Not in cache.
  558. score = 0.f;
  559. }
  560. // Bonus points for having a low number of tris still to use the
  561. // vert, so we get rid of lone verts quickly.
  562. const float kValenceBoostScale = 2.0f;
  563. const float kValenceBoostPower = 0.5f;
  564. const float valence_boost = powf(active_tris, -kValenceBoostPower); // rsqrt?
  565. score += valence_boost * kValenceBoostScale;
  566. };
  567. int active_tris;
  568. unsigned int cache_tag; // == kCacheSize means not in cache.
  569. int output_index; // For output remapping.
  570. float score;
  571. };
  572. // This also updates the vertex scores!
  573. void InsertIndexToCache(int index) {
  574. // Find how recently the vertex was used.
  575. const unsigned int cache_tag = per_vertex_data_[index].cache_tag;
  576. // Don't do anything if the vertex is already at the head of the
  577. // LRU list.
  578. if (cache_tag == 0) return;
  579. // Loop through the cache, inserting the index at the front, and
  580. // bubbling down to where the index was originally found. If the
  581. // index was not originally in the cache, then it claims to be at
  582. // the (kCacheSize + 1)th entry, and we use an extra slot to make
  583. // that case simpler.
  584. int to_insert = index;
  585. for (unsigned int i = 0; i <= cache_tag; ++i) {
  586. const int current_index = cache_[i];
  587. // Update cross references between the entry of the cache and
  588. // the per-vertex data.
  589. cache_[i] = to_insert;
  590. per_vertex_data_[to_insert].cache_tag = i;
  591. per_vertex_data_[to_insert].UpdateScore();
  592. // No need to continue if we find an empty entry.
  593. if (current_index == kUnknownIndex) {
  594. break;
  595. }
  596. to_insert = current_index;
  597. }
  598. }
  599. size_t FindBestTriangle() {
  600. float best_score = -FLT_MAX;
  601. size_t best_triangle = 0;
  602. // TODO: without a boundary structure, this performs a linear
  603. // scan, which makes Tom Forsyth's linear algorithm run in
  604. // quadratic time!
  605. for (size_t i = 0; i < indices_.size(); i += 3) {
  606. const float score =
  607. per_vertex_data_[indices_[i + 0]].score +
  608. per_vertex_data_[indices_[i + 1]].score +
  609. per_vertex_data_[indices_[i + 2]].score;
  610. if (score > best_score) {
  611. best_score = score;
  612. best_triangle = i;
  613. }
  614. }
  615. return best_triangle;
  616. }
  617. const QuantizedAttribList& attribs_;
  618. IndexList indices_;
  619. std::vector<VertexData> per_vertex_data_;
  620. int cache_[kCacheSize + 1];
  621. };
  622. uint16 ZigZag(int16 word) {
  623. return (word >> 15) ^ (word << 1);
  624. }
  625. #define CHECK(PRED) if (!(PRED)) { \
  626. fprintf(stderr, "%d: CHECK failed: " #PRED "\n", __LINE__); \
  627. exit(-1); } else
  628. bool Uint16ToUtf8(uint16 word, std::vector<char>* utf8) {
  629. const char kMoreBytesPrefix = static_cast<char>(0x80);
  630. const uint16 kMoreBytesMask = 0x3F;
  631. if (word < 0x80) {
  632. utf8->push_back(static_cast<char>(word));
  633. } else if (word < 0x800) {
  634. const char kTwoBytePrefix = static_cast<char>(0xC0);
  635. utf8->push_back(kTwoBytePrefix + static_cast<char>(word >> 6));
  636. utf8->push_back(kMoreBytesPrefix +
  637. static_cast<char>(word & kMoreBytesMask));
  638. } else if (word < 0xF800) {
  639. const char kThreeBytePrefix = static_cast<char>(0xE0);
  640. // We can only encode 65535 - 2048 values because of illegal UTF-8
  641. // characters, such as surrogate pairs in [0xD800, 0xDFFF].
  642. //TODO: what about other characters, like reversed-BOM 0xFFFE?
  643. if (word >= 0xD800) {
  644. // Shift the result to avoid the surrogate pair range.
  645. word += 0x0800;
  646. }
  647. utf8->push_back(kThreeBytePrefix + static_cast<char>(word >> 12));
  648. utf8->push_back(kMoreBytesPrefix +
  649. static_cast<char>((word >> 6) & kMoreBytesMask));
  650. utf8->push_back(kMoreBytesPrefix +
  651. static_cast<char>(word & kMoreBytesMask));
  652. } else {
  653. return false;
  654. }
  655. return true;
  656. }
  657. void CompressIndicesToUtf8(const IndexList& list, std::vector<char>* utf8) {
  658. // For indices, we don't do delta from the most recent index, but
  659. // from the high water mark. The assumption is that the high water
  660. // mark only ever moves by one at a time. Foruntately, the vertex
  661. // optimizer does that for us, to optimize for per-transform vertex
  662. // fetch order.
  663. uint16 index_high_water_mark = 0;
  664. for (size_t i = 0; i < list.size(); ++i) {
  665. const int index = list[i];
  666. CHECK(index >= 0);
  667. CHECK(index <= index_high_water_mark);
  668. CHECK(Uint16ToUtf8(index_high_water_mark - index, utf8));
  669. if (index == index_high_water_mark) {
  670. ++index_high_water_mark;
  671. }
  672. }
  673. }
  674. void CompressQuantizedAttribsToUtf8(const QuantizedAttribList& attribs,
  675. std::vector<char>* utf8) {
  676. for (size_t i = 0; i < 8; ++i) {
  677. // Use a transposed representation, and delta compression.
  678. uint16 prev = 0;
  679. for (size_t j = i; j < attribs.size(); j += 8) {
  680. const uint16 word = attribs[j];
  681. const uint16 za = ZigZag(static_cast<int16>(word - prev));
  682. prev = word;
  683. CHECK(Uint16ToUtf8(za, utf8));
  684. }
  685. }
  686. }
  687. void CompressMeshToFile(const QuantizedAttribList& attribs,
  688. const IndexList& indices,
  689. const char* fn) {
  690. CHECK((attribs.size() & 7) == 0);
  691. const size_t num_verts = attribs.size() / 8;
  692. fprintf(stderr, "num_verts: %lu", (unsigned long)num_verts);
  693. CHECK(num_verts > 0);
  694. CHECK(num_verts < 65536);
  695. std::vector<char> utf8;
  696. CHECK(Uint16ToUtf8(static_cast<uint16>(num_verts - 1), &utf8));
  697. CompressQuantizedAttribsToUtf8(attribs, &utf8);
  698. CompressIndicesToUtf8(indices, &utf8);
  699. FILE* fp = fopen(fn, "wb");
  700. fwrite(&utf8[0], 1, utf8.size(), fp);
  701. fclose(fp);
  702. }
  703. #endif // WEBGL_LOADER_MESH_H_