zohtwo.cpp 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883
  1. /*
  2. Copyright 2007 nVidia, Inc.
  3. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
  4. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
  5. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
  6. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  7. See the License for the specific language governing permissions and limitations under the License.
  8. */
  9. // two regions zoh compress/decompress code
  10. // Thanks to Jacob Munkberg ([email protected]) for the shortcut of using SVD to do the equivalent of principal components analysis
  11. /* optimization algorithm
  12. get initial float endpoints
  13. convert endpoints using 16 bit precision, transform, and get bit delta. choose likely endpoint compression candidates.
  14. note that there will be 1 or 2 candidates; 2 will be chosen when the delta values are close to the max possible.
  15. for each EC candidate in order from max precision to smaller precision
  16. convert endpoints using the appropriate precision.
  17. optimize the endpoints and minimize square error. save the error and index assignments. apply index compression as well.
  18. (thus the endpoints and indices are in final form.)
  19. transform and get bit delta.
  20. if the bit delta fits, exit
  21. if we ended up with no candidates somehow, choose the tail set of EC candidates and retry. this should happen hardly ever.
  22. add a state variable to nvDebugCheck we only do this once.
  23. convert to bit stream.
  24. return the error.
  25. Global optimization
  26. order all tiles based on their errors
  27. do something special for high-error tiles
  28. the goal here is to try to avoid tiling artifacts. but I think this is a research problem. let's just generate an error image...
  29. display an image that shows partitioning and precision selected for each tile
  30. */
  31. #include "bits.h"
  32. #include "tile.h"
  33. #include "zoh.h"
  34. #include "zoh_utils.h"
  35. #include "nvmath/Fitting.h"
  36. #include "nvmath/Vector.inl"
  37. #include <string.h> // strlen
  38. #include <float.h> // FLT_MAX
  39. using namespace nv;
  40. using namespace ZOH;
  41. #define NINDICES 8
  42. #define INDEXBITS 3
  43. #define HIGH_INDEXBIT (1<<(INDEXBITS-1))
  44. #define DENOM (NINDICES-1)
  45. // WORK: determine optimal traversal pattern to search for best shape -- what does the error curve look like?
  46. // i.e. can we search shapes in a particular order so we can see the global error minima easily and
  47. // stop without having to touch all shapes?
  48. #include "shapes_two.h"
  49. // use only the first 32 available shapes
  50. #undef NSHAPES
  51. #undef SHAPEBITS
  52. #define NSHAPES 32
  53. #define SHAPEBITS 5
  54. #define POS_TO_X(pos) ((pos)&3)
  55. #define POS_TO_Y(pos) (((pos)>>2)&3)
  56. #define NDELTA 4
  57. struct Chanpat
  58. {
  59. int prec[NDELTA]; // precision pattern for one channel
  60. };
  61. struct Pattern
  62. {
  63. Chanpat chan[NCHANNELS]; // allow different bit patterns per channel -- but we still want constant precision per channel
  64. int transformed; // if 0, deltas are unsigned and no transform; otherwise, signed and transformed
  65. int mode; // associated mode value
  66. int modebits; // number of mode bits
  67. const char *encoding; // verilog description of encoding for this mode
  68. };
  69. #define MAXMODEBITS 5
  70. #define MAXMODES (1<<MAXMODEBITS)
  71. #define NPATTERNS 10
  72. static const Pattern patterns[NPATTERNS] =
  73. {
  74. 11,5,5,5, 11,4,4,4, 11,4,4,4, 1, 0x02, 5, "d[4:0],bz[3],rz[4:0],bz[2],ry[4:0],by[3:0],bz[1],bw[10],bx[3:0],gz[3:0],bz[0],gw[10],gx[3:0],gy[3:0],rw[10],rx[4:0],bw[9:0],gw[9:0],rw[9:0],m[4:0]",
  75. 11,4,4,4, 11,5,5,5, 11,4,4,4, 1, 0x06, 5, "d[4:0],bz[3],gy[4],rz[3:0],bz[2],bz[0],ry[3:0],by[3:0],bz[1],bw[10],bx[3:0],gz[3:0],gw[10],gx[4:0],gy[3:0],gz[4],rw[10],rx[3:0],bw[9:0],gw[9:0],rw[9:0],m[4:0]",
  76. 11,4,4,4, 11,4,4,4, 11,5,5,5, 1, 0x0a, 5, "d[4:0],bz[3],bz[4],rz[3:0],bz[2:1],ry[3:0],by[3:0],bw[10],bx[4:0],gz[3:0],bz[0],gw[10],gx[3:0],gy[3:0],by[4],rw[10],rx[3:0],bw[9:0],gw[9:0],rw[9:0],m[4:0]",
  77. 10,5,5,5, 10,5,5,5, 10,5,5,5, 1, 0x00, 2, "d[4:0],bz[3],rz[4:0],bz[2],ry[4:0],by[3:0],bz[1],bx[4:0],gz[3:0],bz[0],gx[4:0],gy[3:0],gz[4],rx[4:0],bw[9:0],gw[9:0],rw[9:0],bz[4],by[4],gy[4],m[1:0]",
  78. 9,5,5,5, 9,5,5,5, 9,5,5,5, 1, 0x0e, 5, "d[4:0],bz[3],rz[4:0],bz[2],ry[4:0],by[3:0],bz[1],bx[4:0],gz[3:0],bz[0],gx[4:0],gy[3:0],gz[4],rx[4:0],bz[4],bw[8:0],gy[4],gw[8:0],by[4],rw[8:0],m[4:0]",
  79. 8,6,6,6, 8,5,5,5, 8,5,5,5, 1, 0x12, 5, "d[4:0],rz[5:0],ry[5:0],by[3:0],bz[1],bx[4:0],gz[3:0],bz[0],gx[4:0],gy[3:0],rx[5:0],bz[4:3],bw[7:0],gy[4],bz[2],gw[7:0],by[4],gz[4],rw[7:0],m[4:0]",
  80. 8,5,5,5, 8,6,6,6, 8,5,5,5, 1, 0x16, 5, "d[4:0],bz[3],rz[4:0],bz[2],ry[4:0],by[3:0],bz[1],bx[4:0],gz[3:0],gx[5:0],gy[3:0],gz[4],rx[4:0],bz[4],gz[5],bw[7:0],gy[4],gy[5],gw[7:0],by[4],bz[0],rw[7:0],m[4:0]",
  81. 8,5,5,5, 8,5,5,5, 8,6,6,6, 1, 0x1a, 5, "d[4:0],bz[3],rz[4:0],bz[2],ry[4:0],by[3:0],bx[5:0],gz[3:0],bz[0],gx[4:0],gy[3:0],gz[4],rx[4:0],bz[4],bz[5],bw[7:0],gy[4],by[5],gw[7:0],by[4],bz[1],rw[7:0],m[4:0]",
  82. 7,6,6,6, 7,6,6,6, 7,6,6,6, 1, 0x01, 2, "d[4:0],rz[5:0],ry[5:0],by[3:0],bx[5:0],gz[3:0],gx[5:0],gy[3:0],rx[5:0],bz[4],bz[5],bz[3],bw[6:0],gy[4],bz[2],by[5],gw[6:0],by[4],bz[1:0],rw[6:0],gz[5:4],gy[5],m[1:0]",
  83. 6,6,6,6, 6,6,6,6, 6,6,6,6, 0, 0x1e, 5, "d[4:0],rz[5:0],ry[5:0],by[3:0],bx[5:0],gz[3:0],gx[5:0],gy[3:0],rx[5:0],bz[4],bz[5],bz[3],gz[5],bw[5:0],gy[4],bz[2],by[5],gy[5],gw[5:0],by[4],bz[1:0],gz[4],rw[5:0],m[4:0]",
  84. };
  85. // mapping of mode to the corresponding index in pattern
  86. // UNUSED ZOH MODES are 0x13, 0x17, 0x1b, 0x1f -- return -2 for these
  87. static const int mode_to_pat[MAXMODES] = {
  88. 3, // 0x00
  89. 8, // 0x01
  90. 0, // 0x02
  91. -1,-1,-1,
  92. 1, // 0x06
  93. -1,-1,-1,
  94. 2, // 0x0a
  95. -1,-1,-1,
  96. 4, // 0x0e
  97. -1,-1,-1,
  98. 5, // 0x12
  99. -2,-1,-1,
  100. 6, // 0x16
  101. -2,-1,-1,
  102. 7, // 0x1a
  103. -2,-1,-1,
  104. 9, // 0x1e
  105. -2
  106. };
  107. #define R_0(ep) (ep)[0].A[i]
  108. #define R_1(ep) (ep)[0].B[i]
  109. #define R_2(ep) (ep)[1].A[i]
  110. #define R_3(ep) (ep)[1].B[i]
  111. #define MASK(n) ((1<<(n))-1)
  112. // compress endpoints
  113. static void compress_endpts(const IntEndpts in[NREGIONS_TWO], ComprEndpts out[NREGIONS_TWO], const Pattern &p)
  114. {
  115. if (p.transformed)
  116. {
  117. for (int i=0; i<NCHANNELS; ++i)
  118. {
  119. R_0(out) = R_0(in) & MASK(p.chan[i].prec[0]);
  120. R_1(out) = (R_1(in) - R_0(in)) & MASK(p.chan[i].prec[1]);
  121. R_2(out) = (R_2(in) - R_0(in)) & MASK(p.chan[i].prec[2]);
  122. R_3(out) = (R_3(in) - R_0(in)) & MASK(p.chan[i].prec[3]);
  123. }
  124. }
  125. else
  126. {
  127. for (int i=0; i<NCHANNELS; ++i)
  128. {
  129. R_0(out) = R_0(in) & MASK(p.chan[i].prec[0]);
  130. R_1(out) = R_1(in) & MASK(p.chan[i].prec[1]);
  131. R_2(out) = R_2(in) & MASK(p.chan[i].prec[2]);
  132. R_3(out) = R_3(in) & MASK(p.chan[i].prec[3]);
  133. }
  134. }
  135. }
  136. // decompress endpoints
  137. static void decompress_endpts(const ComprEndpts in[NREGIONS_TWO], IntEndpts out[NREGIONS_TWO], const Pattern &p)
  138. {
  139. bool issigned = Utils::FORMAT == SIGNED_F16;
  140. if (p.transformed)
  141. {
  142. for (int i=0; i<NCHANNELS; ++i)
  143. {
  144. R_0(out) = issigned ? SIGN_EXTEND(R_0(in),p.chan[i].prec[0]) : R_0(in);
  145. int t;
  146. t = SIGN_EXTEND(R_1(in), p.chan[i].prec[1]);
  147. t = (t + R_0(in)) & MASK(p.chan[i].prec[0]);
  148. R_1(out) = issigned ? SIGN_EXTEND(t,p.chan[i].prec[0]) : t;
  149. t = SIGN_EXTEND(R_2(in), p.chan[i].prec[2]);
  150. t = (t + R_0(in)) & MASK(p.chan[i].prec[0]);
  151. R_2(out) = issigned ? SIGN_EXTEND(t,p.chan[i].prec[0]) : t;
  152. t = SIGN_EXTEND(R_3(in), p.chan[i].prec[3]);
  153. t = (t + R_0(in)) & MASK(p.chan[i].prec[0]);
  154. R_3(out) = issigned ? SIGN_EXTEND(t,p.chan[i].prec[0]) : t;
  155. }
  156. }
  157. else
  158. {
  159. for (int i=0; i<NCHANNELS; ++i)
  160. {
  161. R_0(out) = issigned ? SIGN_EXTEND(R_0(in),p.chan[i].prec[0]) : R_0(in);
  162. R_1(out) = issigned ? SIGN_EXTEND(R_1(in),p.chan[i].prec[1]) : R_1(in);
  163. R_2(out) = issigned ? SIGN_EXTEND(R_2(in),p.chan[i].prec[2]) : R_2(in);
  164. R_3(out) = issigned ? SIGN_EXTEND(R_3(in),p.chan[i].prec[3]) : R_3(in);
  165. }
  166. }
  167. }
  168. static void quantize_endpts(const FltEndpts endpts[NREGIONS_TWO], int prec, IntEndpts q_endpts[NREGIONS_TWO])
  169. {
  170. for (int region = 0; region < NREGIONS_TWO; ++region)
  171. {
  172. q_endpts[region].A[0] = Utils::quantize(endpts[region].A.x, prec);
  173. q_endpts[region].A[1] = Utils::quantize(endpts[region].A.y, prec);
  174. q_endpts[region].A[2] = Utils::quantize(endpts[region].A.z, prec);
  175. q_endpts[region].B[0] = Utils::quantize(endpts[region].B.x, prec);
  176. q_endpts[region].B[1] = Utils::quantize(endpts[region].B.y, prec);
  177. q_endpts[region].B[2] = Utils::quantize(endpts[region].B.z, prec);
  178. }
  179. }
  180. // swap endpoints as needed to ensure that the indices at index_positions have a 0 high-order bit
  181. static void swap_indices(IntEndpts endpts[NREGIONS_TWO], int indices[Tile::TILE_H][Tile::TILE_W], int shapeindex)
  182. {
  183. for (int region = 0; region < NREGIONS_TWO; ++region)
  184. {
  185. int position = SHAPEINDEX_TO_COMPRESSED_INDICES(shapeindex,region);
  186. int x = POS_TO_X(position);
  187. int y = POS_TO_Y(position);
  188. nvDebugCheck(REGION(x,y,shapeindex) == region); // double check the table
  189. if (indices[y][x] & HIGH_INDEXBIT)
  190. {
  191. // high bit is set, swap the endpts and indices for this region
  192. int t;
  193. for (int i=0; i<NCHANNELS; ++i)
  194. {
  195. t = endpts[region].A[i]; endpts[region].A[i] = endpts[region].B[i]; endpts[region].B[i] = t;
  196. }
  197. for (int y = 0; y < Tile::TILE_H; y++)
  198. for (int x = 0; x < Tile::TILE_W; x++)
  199. if (REGION(x,y,shapeindex) == region)
  200. indices[y][x] = NINDICES - 1 - indices[y][x];
  201. }
  202. }
  203. }
  204. // endpoints fit only if the compression was lossless
  205. static bool endpts_fit(const IntEndpts orig[NREGIONS_TWO], const ComprEndpts compressed[NREGIONS_TWO], const Pattern &p)
  206. {
  207. IntEndpts uncompressed[NREGIONS_TWO];
  208. decompress_endpts(compressed, uncompressed, p);
  209. for (int j=0; j<NREGIONS_TWO; ++j)
  210. {
  211. for (int i=0; i<NCHANNELS; ++i)
  212. {
  213. if (orig[j].A[i] != uncompressed[j].A[i]) return false;
  214. if (orig[j].B[i] != uncompressed[j].B[i]) return false;
  215. }
  216. }
  217. return true;
  218. }
  219. static void write_header(const ComprEndpts endpts[NREGIONS_TWO], int shapeindex, const Pattern &p, Bits &out)
  220. {
  221. // interpret the verilog backwards and process it
  222. int m = p.mode;
  223. int d = shapeindex;
  224. int rw = endpts[0].A[0], rx = endpts[0].B[0], ry = endpts[1].A[0], rz = endpts[1].B[0];
  225. int gw = endpts[0].A[1], gx = endpts[0].B[1], gy = endpts[1].A[1], gz = endpts[1].B[1];
  226. int bw = endpts[0].A[2], bx = endpts[0].B[2], by = endpts[1].A[2], bz = endpts[1].B[2];
  227. int ptr = int(strlen(p.encoding));
  228. while (ptr)
  229. {
  230. Field field;
  231. int endbit, len;
  232. // !!!UNDONE: get rid of string parsing!!!
  233. Utils::parse(p.encoding, ptr, field, endbit, len);
  234. switch(field)
  235. {
  236. case FIELD_M: out.write( m >> endbit, len); break;
  237. case FIELD_D: out.write( d >> endbit, len); break;
  238. case FIELD_RW: out.write(rw >> endbit, len); break;
  239. case FIELD_RX: out.write(rx >> endbit, len); break;
  240. case FIELD_RY: out.write(ry >> endbit, len); break;
  241. case FIELD_RZ: out.write(rz >> endbit, len); break;
  242. case FIELD_GW: out.write(gw >> endbit, len); break;
  243. case FIELD_GX: out.write(gx >> endbit, len); break;
  244. case FIELD_GY: out.write(gy >> endbit, len); break;
  245. case FIELD_GZ: out.write(gz >> endbit, len); break;
  246. case FIELD_BW: out.write(bw >> endbit, len); break;
  247. case FIELD_BX: out.write(bx >> endbit, len); break;
  248. case FIELD_BY: out.write(by >> endbit, len); break;
  249. case FIELD_BZ: out.write(bz >> endbit, len); break;
  250. default: nvUnreachable();
  251. }
  252. }
  253. }
  254. static bool read_header(Bits &in, ComprEndpts endpts[NREGIONS_TWO], int &shapeindex, Pattern &p)
  255. {
  256. // reading isn't quite symmetric with writing -- we don't know the encoding until we decode the mode
  257. int mode = in.read(2);
  258. if (mode != 0x00 && mode != 0x01)
  259. mode = (in.read(3) << 2) | mode;
  260. int pat_index = mode_to_pat[mode];
  261. if (pat_index == -2)
  262. return false; // reserved mode found
  263. nvDebugCheck (pat_index >= 0 && pat_index < NPATTERNS);
  264. nvDebugCheck (in.getptr() == patterns[pat_index].modebits);
  265. p = patterns[pat_index];
  266. int d;
  267. int rw, rx, ry, rz;
  268. int gw, gx, gy, gz;
  269. int bw, bx, by, bz;
  270. d = 0;
  271. rw = rx = ry = rz = 0;
  272. gw = gx = gy = gz = 0;
  273. bw = bx = by = bz = 0;
  274. int ptr = int(strlen(p.encoding));
  275. while (ptr)
  276. {
  277. Field field;
  278. int endbit, len;
  279. // !!!UNDONE: get rid of string parsing!!!
  280. Utils::parse(p.encoding, ptr, field, endbit, len);
  281. switch(field)
  282. {
  283. case FIELD_M: break; // already processed so ignore
  284. case FIELD_D: d |= in.read(len) << endbit; break;
  285. case FIELD_RW: rw |= in.read(len) << endbit; break;
  286. case FIELD_RX: rx |= in.read(len) << endbit; break;
  287. case FIELD_RY: ry |= in.read(len) << endbit; break;
  288. case FIELD_RZ: rz |= in.read(len) << endbit; break;
  289. case FIELD_GW: gw |= in.read(len) << endbit; break;
  290. case FIELD_GX: gx |= in.read(len) << endbit; break;
  291. case FIELD_GY: gy |= in.read(len) << endbit; break;
  292. case FIELD_GZ: gz |= in.read(len) << endbit; break;
  293. case FIELD_BW: bw |= in.read(len) << endbit; break;
  294. case FIELD_BX: bx |= in.read(len) << endbit; break;
  295. case FIELD_BY: by |= in.read(len) << endbit; break;
  296. case FIELD_BZ: bz |= in.read(len) << endbit; break;
  297. default: nvUnreachable();
  298. }
  299. }
  300. nvDebugCheck (in.getptr() == 128 - 46);
  301. shapeindex = d;
  302. endpts[0].A[0] = rw; endpts[0].B[0] = rx; endpts[1].A[0] = ry; endpts[1].B[0] = rz;
  303. endpts[0].A[1] = gw; endpts[0].B[1] = gx; endpts[1].A[1] = gy; endpts[1].B[1] = gz;
  304. endpts[0].A[2] = bw; endpts[0].B[2] = bx; endpts[1].A[2] = by; endpts[1].B[2] = bz;
  305. return true;
  306. }
  307. static void write_indices(const int indices[Tile::TILE_H][Tile::TILE_W], int shapeindex, Bits &out)
  308. {
  309. int positions[NREGIONS_TWO];
  310. for (int r = 0; r < NREGIONS_TWO; ++r)
  311. positions[r] = SHAPEINDEX_TO_COMPRESSED_INDICES(shapeindex,r);
  312. for (int pos = 0; pos < Tile::TILE_TOTAL; ++pos)
  313. {
  314. int x = POS_TO_X(pos);
  315. int y = POS_TO_Y(pos);
  316. bool match = false;
  317. for (int r = 0; r < NREGIONS_TWO; ++r)
  318. if (positions[r] == pos) { match = true; break; }
  319. out.write(indices[y][x], INDEXBITS - (match ? 1 : 0));
  320. }
  321. }
  322. static void emit_block(const ComprEndpts compr_endpts[NREGIONS_TWO], int shapeindex, const Pattern &p, const int indices[Tile::TILE_H][Tile::TILE_W], char *block)
  323. {
  324. Bits out(block, ZOH::BITSIZE);
  325. write_header(compr_endpts, shapeindex, p, out);
  326. write_indices(indices, shapeindex, out);
  327. nvDebugCheck(out.getptr() == ZOH::BITSIZE);
  328. }
  329. static void generate_palette_quantized(const IntEndpts &endpts, int prec, Vector3 palette[NINDICES])
  330. {
  331. // scale endpoints
  332. int a, b; // really need a IntVector3...
  333. a = Utils::unquantize(endpts.A[0], prec);
  334. b = Utils::unquantize(endpts.B[0], prec);
  335. // interpolate
  336. for (int i = 0; i < NINDICES; ++i)
  337. palette[i].x = float(Utils::finish_unquantize(Utils::lerp(a, b, i, DENOM), prec));
  338. a = Utils::unquantize(endpts.A[1], prec);
  339. b = Utils::unquantize(endpts.B[1], prec);
  340. // interpolate
  341. for (int i = 0; i < NINDICES; ++i)
  342. palette[i].y = float(Utils::finish_unquantize(Utils::lerp(a, b, i, DENOM), prec));
  343. a = Utils::unquantize(endpts.A[2], prec);
  344. b = Utils::unquantize(endpts.B[2], prec);
  345. // interpolate
  346. for (int i = 0; i < NINDICES; ++i)
  347. palette[i].z = float(Utils::finish_unquantize(Utils::lerp(a, b, i, DENOM), prec));
  348. }
  349. static void read_indices(Bits &in, int shapeindex, int indices[Tile::TILE_H][Tile::TILE_W])
  350. {
  351. int positions[NREGIONS_TWO];
  352. for (int r = 0; r < NREGIONS_TWO; ++r)
  353. positions[r] = SHAPEINDEX_TO_COMPRESSED_INDICES(shapeindex,r);
  354. for (int pos = 0; pos < Tile::TILE_TOTAL; ++pos)
  355. {
  356. int x = POS_TO_X(pos);
  357. int y = POS_TO_Y(pos);
  358. bool match = false;
  359. for (int r = 0; r < NREGIONS_TWO; ++r)
  360. if (positions[r] == pos) { match = true; break; }
  361. indices[y][x]= in.read(INDEXBITS - (match ? 1 : 0));
  362. }
  363. }
  364. void ZOH::decompresstwo(const char *block, Tile &t)
  365. {
  366. Bits in(block, ZOH::BITSIZE);
  367. Pattern p;
  368. IntEndpts endpts[NREGIONS_TWO];
  369. ComprEndpts compr_endpts[NREGIONS_TWO];
  370. int shapeindex;
  371. if (!read_header(in, compr_endpts, shapeindex, p))
  372. {
  373. // reserved mode, return all zeroes
  374. for (int y = 0; y < Tile::TILE_H; y++)
  375. for (int x = 0; x < Tile::TILE_W; x++)
  376. t.data[y][x] = Vector3(0.0f);
  377. return;
  378. }
  379. decompress_endpts(compr_endpts, endpts, p);
  380. Vector3 palette[NREGIONS_TWO][NINDICES];
  381. for (int r = 0; r < NREGIONS_TWO; ++r)
  382. generate_palette_quantized(endpts[r], p.chan[0].prec[0], &palette[r][0]);
  383. int indices[Tile::TILE_H][Tile::TILE_W];
  384. read_indices(in, shapeindex, indices);
  385. nvDebugCheck(in.getptr() == ZOH::BITSIZE);
  386. // lookup
  387. for (int y = 0; y < Tile::TILE_H; y++)
  388. for (int x = 0; x < Tile::TILE_W; x++)
  389. t.data[y][x] = palette[REGION(x,y,shapeindex)][indices[y][x]];
  390. }
  391. // given a collection of colors and quantized endpoints, generate a palette, choose best entries, and return a single toterr
  392. static float map_colors(const Vector3 colors[], const float importance[], int np, const IntEndpts &endpts, int prec)
  393. {
  394. Vector3 palette[NINDICES];
  395. float toterr = 0;
  396. Vector3 err;
  397. generate_palette_quantized(endpts, prec, palette);
  398. for (int i = 0; i < np; ++i)
  399. {
  400. float err, besterr;
  401. besterr = Utils::norm(colors[i], palette[0]) * importance[i];
  402. for (int j = 1; j < NINDICES && besterr > 0; ++j)
  403. {
  404. err = Utils::norm(colors[i], palette[j]) * importance[i];
  405. if (err > besterr) // error increased, so we're done searching
  406. break;
  407. if (err < besterr)
  408. besterr = err;
  409. }
  410. toterr += besterr;
  411. }
  412. return toterr;
  413. }
  414. // assign indices given a tile, shape, and quantized endpoints, return toterr for each region
  415. static void assign_indices(const Tile &tile, int shapeindex, IntEndpts endpts[NREGIONS_TWO], int prec,
  416. int indices[Tile::TILE_H][Tile::TILE_W], float toterr[NREGIONS_TWO])
  417. {
  418. // build list of possibles
  419. Vector3 palette[NREGIONS_TWO][NINDICES];
  420. for (int region = 0; region < NREGIONS_TWO; ++region)
  421. {
  422. generate_palette_quantized(endpts[region], prec, &palette[region][0]);
  423. toterr[region] = 0;
  424. }
  425. Vector3 err;
  426. for (int y = 0; y < tile.size_y; y++)
  427. for (int x = 0; x < tile.size_x; x++)
  428. {
  429. int region = REGION(x,y,shapeindex);
  430. float err, besterr;
  431. besterr = Utils::norm(tile.data[y][x], palette[region][0]);
  432. indices[y][x] = 0;
  433. for (int i = 1; i < NINDICES && besterr > 0; ++i)
  434. {
  435. err = Utils::norm(tile.data[y][x], palette[region][i]);
  436. if (err > besterr) // error increased, so we're done searching
  437. break;
  438. if (err < besterr)
  439. {
  440. besterr = err;
  441. indices[y][x] = i;
  442. }
  443. }
  444. toterr[region] += besterr;
  445. }
  446. }
  447. static float perturb_one(const Vector3 colors[], const float importance[], int np, int ch, int prec, const IntEndpts &old_endpts, IntEndpts &new_endpts,
  448. float old_err, int do_b)
  449. {
  450. // we have the old endpoints: old_endpts
  451. // we have the perturbed endpoints: new_endpts
  452. // we have the temporary endpoints: temp_endpts
  453. IntEndpts temp_endpts;
  454. float min_err = old_err; // start with the best current error
  455. int beststep;
  456. // copy real endpoints so we can perturb them
  457. for (int i=0; i<NCHANNELS; ++i) { temp_endpts.A[i] = new_endpts.A[i] = old_endpts.A[i]; temp_endpts.B[i] = new_endpts.B[i] = old_endpts.B[i]; }
  458. // do a logarithmic search for the best error for this endpoint (which)
  459. for (int step = 1 << (prec-1); step; step >>= 1)
  460. {
  461. bool improved = false;
  462. for (int sign = -1; sign <= 1; sign += 2)
  463. {
  464. if (do_b == 0)
  465. {
  466. temp_endpts.A[ch] = new_endpts.A[ch] + sign * step;
  467. if (temp_endpts.A[ch] < 0 || temp_endpts.A[ch] >= (1 << prec))
  468. continue;
  469. }
  470. else
  471. {
  472. temp_endpts.B[ch] = new_endpts.B[ch] + sign * step;
  473. if (temp_endpts.B[ch] < 0 || temp_endpts.B[ch] >= (1 << prec))
  474. continue;
  475. }
  476. float err = map_colors(colors, importance, np, temp_endpts, prec);
  477. if (err < min_err)
  478. {
  479. improved = true;
  480. min_err = err;
  481. beststep = sign * step;
  482. }
  483. }
  484. // if this was an improvement, move the endpoint and continue search from there
  485. if (improved)
  486. {
  487. if (do_b == 0)
  488. new_endpts.A[ch] += beststep;
  489. else
  490. new_endpts.B[ch] += beststep;
  491. }
  492. }
  493. return min_err;
  494. }
  495. static void optimize_one(const Vector3 colors[], const float importance[], int np, float orig_err, const IntEndpts &orig_endpts, int prec, IntEndpts &opt_endpts)
  496. {
  497. float opt_err = orig_err;
  498. for (int ch = 0; ch < NCHANNELS; ++ch)
  499. {
  500. opt_endpts.A[ch] = orig_endpts.A[ch];
  501. opt_endpts.B[ch] = orig_endpts.B[ch];
  502. }
  503. /*
  504. err0 = perturb(rgb0, delta0)
  505. err1 = perturb(rgb1, delta1)
  506. if (err0 < err1)
  507. if (err0 >= initial_error) break
  508. rgb0 += delta0
  509. next = 1
  510. else
  511. if (err1 >= initial_error) break
  512. rgb1 += delta1
  513. next = 0
  514. initial_err = map()
  515. for (;;)
  516. err = perturb(next ? rgb1:rgb0, delta)
  517. if (err >= initial_err) break
  518. next? rgb1 : rgb0 += delta
  519. initial_err = err
  520. */
  521. IntEndpts new_a, new_b;
  522. IntEndpts new_endpt;
  523. int do_b;
  524. // now optimize each channel separately
  525. for (int ch = 0; ch < NCHANNELS; ++ch)
  526. {
  527. // figure out which endpoint when perturbed gives the most improvement and start there
  528. // if we just alternate, we can easily end up in a local minima
  529. float err0 = perturb_one(colors, importance, np, ch, prec, opt_endpts, new_a, opt_err, 0); // perturb endpt A
  530. float err1 = perturb_one(colors, importance, np, ch, prec, opt_endpts, new_b, opt_err, 1); // perturb endpt B
  531. if (err0 < err1)
  532. {
  533. if (err0 >= opt_err)
  534. continue;
  535. opt_endpts.A[ch] = new_a.A[ch];
  536. opt_err = err0;
  537. do_b = 1; // do B next
  538. }
  539. else
  540. {
  541. if (err1 >= opt_err)
  542. continue;
  543. opt_endpts.B[ch] = new_b.B[ch];
  544. opt_err = err1;
  545. do_b = 0; // do A next
  546. }
  547. // now alternate endpoints and keep trying until there is no improvement
  548. for (;;)
  549. {
  550. float err = perturb_one(colors, importance, np, ch, prec, opt_endpts, new_endpt, opt_err, do_b);
  551. if (err >= opt_err)
  552. break;
  553. if (do_b == 0)
  554. opt_endpts.A[ch] = new_endpt.A[ch];
  555. else
  556. opt_endpts.B[ch] = new_endpt.B[ch];
  557. opt_err = err;
  558. do_b = 1 - do_b; // now move the other endpoint
  559. }
  560. }
  561. }
  562. static void optimize_endpts(const Tile &tile, int shapeindex, const float orig_err[NREGIONS_TWO],
  563. const IntEndpts orig_endpts[NREGIONS_TWO], int prec, IntEndpts opt_endpts[NREGIONS_TWO])
  564. {
  565. Vector3 pixels[Tile::TILE_TOTAL];
  566. float importance[Tile::TILE_TOTAL];
  567. float err = 0;
  568. for (int region=0; region<NREGIONS_TWO; ++region)
  569. {
  570. // collect the pixels in the region
  571. int np = 0;
  572. for (int y = 0; y < tile.size_y; y++)
  573. for (int x = 0; x < tile.size_x; x++)
  574. if (REGION(x,y,shapeindex) == region)
  575. {
  576. pixels[np] = tile.data[y][x];
  577. importance[np] = tile.importance_map[y][x];
  578. ++np;
  579. }
  580. optimize_one(pixels, importance, np, orig_err[region], orig_endpts[region], prec, opt_endpts[region]);
  581. }
  582. }
  583. /* optimization algorithm
  584. for each pattern
  585. convert endpoints using pattern precision
  586. assign indices and get initial error
  587. compress indices (and possibly reorder endpoints)
  588. transform endpoints
  589. if transformed endpoints fit pattern
  590. get original endpoints back
  591. optimize endpoints, get new endpoints, new indices, and new error // new error will almost always be better
  592. compress new indices
  593. transform new endpoints
  594. if new endpoints fit pattern AND if error is improved
  595. emit compressed block with new data
  596. else
  597. emit compressed block with original data // to try to preserve maximum endpoint precision
  598. */
  599. float ZOH::refinetwo(const Tile &tile, int shapeindex_best, const FltEndpts endpts[NREGIONS_TWO], char *block)
  600. {
  601. float orig_err[NREGIONS_TWO], opt_err[NREGIONS_TWO], orig_toterr, opt_toterr;
  602. IntEndpts orig_endpts[NREGIONS_TWO], opt_endpts[NREGIONS_TWO];
  603. ComprEndpts compr_orig[NREGIONS_TWO], compr_opt[NREGIONS_TWO];
  604. int orig_indices[Tile::TILE_H][Tile::TILE_W], opt_indices[Tile::TILE_H][Tile::TILE_W];
  605. for (int sp = 0; sp < NPATTERNS; ++sp)
  606. {
  607. // precisions for all channels need to be the same
  608. for (int i=1; i<NCHANNELS; ++i) nvDebugCheck (patterns[sp].chan[0].prec[0] == patterns[sp].chan[i].prec[0]);
  609. quantize_endpts(endpts, patterns[sp].chan[0].prec[0], orig_endpts);
  610. assign_indices(tile, shapeindex_best, orig_endpts, patterns[sp].chan[0].prec[0], orig_indices, orig_err);
  611. swap_indices(orig_endpts, orig_indices, shapeindex_best);
  612. compress_endpts(orig_endpts, compr_orig, patterns[sp]);
  613. if (endpts_fit(orig_endpts, compr_orig, patterns[sp]))
  614. {
  615. optimize_endpts(tile, shapeindex_best, orig_err, orig_endpts, patterns[sp].chan[0].prec[0], opt_endpts);
  616. assign_indices(tile, shapeindex_best, opt_endpts, patterns[sp].chan[0].prec[0], opt_indices, opt_err);
  617. swap_indices(opt_endpts, opt_indices, shapeindex_best);
  618. compress_endpts(opt_endpts, compr_opt, patterns[sp]);
  619. orig_toterr = opt_toterr = 0;
  620. for (int i=0; i < NREGIONS_TWO; ++i) { orig_toterr += orig_err[i]; opt_toterr += opt_err[i]; }
  621. if (endpts_fit(opt_endpts, compr_opt, patterns[sp]) && opt_toterr < orig_toterr)
  622. {
  623. emit_block(compr_opt, shapeindex_best, patterns[sp], opt_indices, block);
  624. return opt_toterr;
  625. }
  626. else
  627. {
  628. // either it stopped fitting when we optimized it, or there was no improvement
  629. // so go back to the unoptimized endpoints which we know will fit
  630. emit_block(compr_orig, shapeindex_best, patterns[sp], orig_indices, block);
  631. return orig_toterr;
  632. }
  633. }
  634. }
  635. nvAssert(false); //throw "No candidate found, should never happen (refinetwo.)";
  636. return FLT_MAX;
  637. }
  638. static void generate_palette_unquantized(const FltEndpts endpts[NREGIONS_TWO], Vector3 palette[NREGIONS_TWO][NINDICES])
  639. {
  640. for (int region = 0; region < NREGIONS_TWO; ++region)
  641. for (int i = 0; i < NINDICES; ++i)
  642. palette[region][i] = Utils::lerp(endpts[region].A, endpts[region].B, i, DENOM);
  643. }
  644. // generate a palette from unquantized endpoints, then pick best palette color for all pixels in each region, return toterr for all regions combined
  645. static float map_colors(const Tile &tile, int shapeindex, const FltEndpts endpts[NREGIONS_TWO])
  646. {
  647. // build list of possibles
  648. Vector3 palette[NREGIONS_TWO][NINDICES];
  649. generate_palette_unquantized(endpts, palette);
  650. float toterr = 0;
  651. Vector3 err;
  652. for (int y = 0; y < tile.size_y; y++)
  653. for (int x = 0; x < tile.size_x; x++)
  654. {
  655. int region = REGION(x,y,shapeindex);
  656. float err, besterr;
  657. besterr = Utils::norm(tile.data[y][x], palette[region][0]) * tile.importance_map[y][x];
  658. for (int i = 1; i < NINDICES && besterr > 0; ++i)
  659. {
  660. err = Utils::norm(tile.data[y][x], palette[region][i]) * tile.importance_map[y][x];
  661. if (err > besterr) // error increased, so we're done searching
  662. break;
  663. if (err < besterr)
  664. besterr = err;
  665. }
  666. toterr += besterr;
  667. }
  668. return toterr;
  669. }
  670. float ZOH::roughtwo(const Tile &tile, int shapeindex, FltEndpts endpts[NREGIONS_TWO])
  671. {
  672. for (int region=0; region<NREGIONS_TWO; ++region)
  673. {
  674. int np = 0;
  675. Vector3 colors[Tile::TILE_TOTAL];
  676. Vector3 mean(0,0,0);
  677. for (int y = 0; y < tile.size_y; y++)
  678. for (int x = 0; x < tile.size_x; x++)
  679. if (REGION(x,y,shapeindex) == region)
  680. {
  681. colors[np] = tile.data[y][x];
  682. mean += tile.data[y][x];
  683. ++np;
  684. }
  685. // handle simple cases
  686. if (np == 0)
  687. {
  688. Vector3 zero(0,0,0);
  689. endpts[region].A = zero;
  690. endpts[region].B = zero;
  691. continue;
  692. }
  693. else if (np == 1)
  694. {
  695. endpts[region].A = colors[0];
  696. endpts[region].B = colors[0];
  697. continue;
  698. }
  699. else if (np == 2)
  700. {
  701. endpts[region].A = colors[0];
  702. endpts[region].B = colors[1];
  703. continue;
  704. }
  705. mean /= float(np);
  706. Vector3 direction = Fit::computePrincipalComponent_EigenSolver(np, colors);
  707. // project each pixel value along the principal direction
  708. float minp = FLT_MAX, maxp = -FLT_MAX;
  709. for (int i = 0; i < np; i++)
  710. {
  711. float dp = dot(colors[i]-mean, direction);
  712. if (dp < minp) minp = dp;
  713. if (dp > maxp) maxp = dp;
  714. }
  715. // choose as endpoints 2 points along the principal direction that span the projections of all of the pixel values
  716. endpts[region].A = mean + minp*direction;
  717. endpts[region].B = mean + maxp*direction;
  718. // clamp endpoints
  719. // the argument for clamping is that the actual endpoints need to be clamped and thus we need to choose the best
  720. // shape based on endpoints being clamped
  721. Utils::clamp(endpts[region].A);
  722. Utils::clamp(endpts[region].B);
  723. }
  724. return map_colors(tile, shapeindex, endpts);
  725. }
  726. float ZOH::compresstwo(const Tile &t, char *block)
  727. {
  728. int shapeindex_best = 0;
  729. FltEndpts endptsbest[NREGIONS_TWO], tempendpts[NREGIONS_TWO];
  730. float msebest = FLT_MAX;
  731. /*
  732. collect the mse values that are within 5% of the best values
  733. optimize each one and choose the best
  734. */
  735. // hack for now -- just use the best value WORK
  736. for (int i=0; i<NSHAPES && msebest>0.0; ++i)
  737. {
  738. float mse = roughtwo(t, i, tempendpts);
  739. if (mse < msebest)
  740. {
  741. msebest = mse;
  742. shapeindex_best = i;
  743. memcpy(endptsbest, tempendpts, sizeof(endptsbest));
  744. }
  745. }
  746. return refinetwo(t, shapeindex_best, endptsbest, block);
  747. }