avpcl_mode6.cpp 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055
  1. /*
  2. Copyright 2007 nVidia, Inc.
  3. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
  4. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
  5. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
  6. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  7. See the License for the specific language governing permissions and limitations under the License.
  8. */
  9. // Thanks to Jacob Munkberg ([email protected]) for the shortcut of using SVD to do the equivalent of principal components analysis
  10. // x1000000 7777.1x2 4bi
  11. #include "bits.h"
  12. #include "tile.h"
  13. #include "avpcl.h"
  14. #include "nvcore/debug.h"
  15. #include "nvmath/vector.inl"
  16. #include "nvmath/matrix.inl"
  17. #include "nvmath/fitting.h"
  18. #include "avpcl_utils.h"
  19. #include "endpts.h"
  20. #include <string.h>
  21. #include <float.h>
  22. using namespace nv;
  23. using namespace AVPCL;
  24. #define NLSBMODES 4 // number of different lsb modes per region. since we have two .1 per region, that can have 4 values
  25. #define NINDICES 16
  26. #define INDEXBITS 4
  27. #define HIGH_INDEXBIT (1<<(INDEXBITS-1))
  28. #define DENOM (NINDICES-1)
  29. #define BIAS (DENOM/2)
  30. #define NSHAPES 1
  31. static int shapes[NSHAPES] =
  32. {
  33. 0x0000,
  34. };
  35. #define REGION(x,y,shapeindex) ((shapes[shapeindex]&(1<<(15-(x)-4*(y))))!=0)
  36. #define NREGIONS 1
  37. #define NBITSIZES (NREGIONS*2)
  38. #define ABITINDEX(region) (2*(region)+0)
  39. #define BBITINDEX(region) (2*(region)+1)
  40. struct ChanBits
  41. {
  42. int nbitsizes[NBITSIZES]; // bitsizes for one channel
  43. };
  44. struct Pattern
  45. {
  46. ChanBits chan[NCHANNELS_RGBA];// bit patterns used per channel
  47. int mode; // associated mode value
  48. int modebits; // number of mode bits
  49. const char *encoding; // verilog description of encoding for this mode
  50. };
  51. #define NPATTERNS 1
  52. static Pattern patterns[NPATTERNS] =
  53. {
  54. // red green blue alpha mode mb verilog
  55. 7,7, 7,7, 7,7, 7,7, 0x40, 7, "",
  56. };
  57. struct RegionPrec
  58. {
  59. int endpt_a_prec[NCHANNELS_RGBA];
  60. int endpt_b_prec[NCHANNELS_RGBA];
  61. };
  62. struct PatternPrec
  63. {
  64. RegionPrec region_precs[NREGIONS];
  65. };
  66. // this is the precision for each channel and region
  67. // NOTE: this MUST match the corresponding data in "patterns" above -- WARNING: there is NO nvAssert to check this!
  68. static PatternPrec pattern_precs[NPATTERNS] =
  69. {
  70. 7,7,7,7, 7,7,7,7,
  71. };
  72. // return # of bits needed to store n. handle signed or unsigned cases properly
  73. static int nbits(int n, bool issigned)
  74. {
  75. int nb;
  76. if (n==0)
  77. return 0; // no bits needed for 0, signed or not
  78. else if (n > 0)
  79. {
  80. for (nb=0; n; ++nb, n>>=1) ;
  81. return nb + (issigned?1:0);
  82. }
  83. else
  84. {
  85. nvAssert (issigned);
  86. for (nb=0; n<-1; ++nb, n>>=1) ;
  87. return nb + 1;
  88. }
  89. }
  90. /*
  91. we're using this table to assign lsbs
  92. abgr >=2 correct
  93. 0000 0 0
  94. 0001 0 0
  95. 0010 0 0
  96. 0011 1 x1
  97. 0100 0 0
  98. 0101 1 x1
  99. 0110 1 x1
  100. 0111 1 1
  101. 1000 0 0
  102. 1001 1 x0
  103. 1010 1 x0
  104. 1011 1 1
  105. 1100 1 x0
  106. 1101 1 1
  107. 1110 1 1
  108. 1111 1 1
  109. we need 8 0's and 8 1's. the x's can be either 0 or 1 as long as you get 8/8.
  110. I choose to assign the lsbs so that the rgb channels are as good as possible.
  111. */
  112. // 8888 ->7777.1, use the "correct" column above to assign the lsb
  113. static void compress_one(const IntEndptsRGBA& endpts, IntEndptsRGBA_2& compr_endpts)
  114. {
  115. int onescnt;
  116. onescnt = 0;
  117. for (int j=0; j<NCHANNELS_RGBA; ++j)
  118. {
  119. // ignore the alpha channel in the count
  120. onescnt += (j==CHANNEL_A) ? 0 : (endpts.A[j] & 1);
  121. compr_endpts.A[j] = endpts.A[j] >> 1;
  122. nvAssert (compr_endpts.A[j] < 128);
  123. }
  124. compr_endpts.a_lsb = onescnt >= 2;
  125. onescnt = 0;
  126. for (int j=0; j<NCHANNELS_RGBA; ++j)
  127. {
  128. onescnt += (j==CHANNEL_A) ? 0 : (endpts.B[j] & 1);
  129. compr_endpts.B[j] = endpts.B[j] >> 1;
  130. nvAssert (compr_endpts.B[j] < 128);
  131. }
  132. compr_endpts.b_lsb = onescnt >= 2;
  133. }
  134. static void uncompress_one(const IntEndptsRGBA_2& compr_endpts, IntEndptsRGBA& endpts)
  135. {
  136. for (int j=0; j<NCHANNELS_RGBA; ++j)
  137. {
  138. endpts.A[j] = (compr_endpts.A[j] << 1) | compr_endpts.a_lsb;
  139. endpts.B[j] = (compr_endpts.B[j] << 1) | compr_endpts.b_lsb;
  140. }
  141. }
  142. static void uncompress_endpoints(const IntEndptsRGBA_2 compr_endpts[NREGIONS], IntEndptsRGBA endpts[NREGIONS])
  143. {
  144. for (int i=0; i<NREGIONS; ++i)
  145. uncompress_one(compr_endpts[i], endpts[i]);
  146. }
  147. static void compress_endpoints(const IntEndptsRGBA endpts[NREGIONS], IntEndptsRGBA_2 compr_endpts[NREGIONS])
  148. {
  149. for (int i=0; i<NREGIONS; ++i)
  150. compress_one(endpts[i], compr_endpts[i]);
  151. }
  152. static void quantize_endpts(const FltEndpts endpts[NREGIONS], const PatternPrec &pattern_prec, IntEndptsRGBA_2 q_endpts[NREGIONS])
  153. {
  154. IntEndptsRGBA full_endpts[NREGIONS];
  155. for (int region = 0; region < NREGIONS; ++region)
  156. {
  157. full_endpts[region].A[0] = Utils::quantize(endpts[region].A.x, pattern_prec.region_precs[region].endpt_a_prec[0]+1); // +1 since we are in uncompressed space
  158. full_endpts[region].A[1] = Utils::quantize(endpts[region].A.y, pattern_prec.region_precs[region].endpt_a_prec[1]+1);
  159. full_endpts[region].A[2] = Utils::quantize(endpts[region].A.z, pattern_prec.region_precs[region].endpt_a_prec[2]+1);
  160. full_endpts[region].A[3] = Utils::quantize(endpts[region].A.w, pattern_prec.region_precs[region].endpt_a_prec[3]+1);
  161. full_endpts[region].B[0] = Utils::quantize(endpts[region].B.x, pattern_prec.region_precs[region].endpt_b_prec[0]+1);
  162. full_endpts[region].B[1] = Utils::quantize(endpts[region].B.y, pattern_prec.region_precs[region].endpt_b_prec[1]+1);
  163. full_endpts[region].B[2] = Utils::quantize(endpts[region].B.z, pattern_prec.region_precs[region].endpt_b_prec[2]+1);
  164. full_endpts[region].B[3] = Utils::quantize(endpts[region].B.w, pattern_prec.region_precs[region].endpt_b_prec[3]+1);
  165. compress_one(full_endpts[region], q_endpts[region]);
  166. }
  167. }
  168. // swap endpoints as needed to ensure that the indices at index_one and index_two have a 0 high-order bit
  169. // index_two is 0 at x=0 y=0 and 15 at x=3 y=3 so y = (index >> 2) & 3 and x = index & 3
  170. static void swap_indices(IntEndptsRGBA_2 endpts[NREGIONS], int indices[Tile::TILE_H][Tile::TILE_W], int shapeindex)
  171. {
  172. int index_positions[NREGIONS];
  173. index_positions[0] = 0; // since WLOG we have the high bit of the shapes at 0
  174. for (int region = 0; region < NREGIONS; ++region)
  175. {
  176. int x = index_positions[region] & 3;
  177. int y = (index_positions[region] >> 2) & 3;
  178. nvAssert(REGION(x,y,shapeindex) == region); // double check the table
  179. if (indices[y][x] & HIGH_INDEXBIT)
  180. {
  181. // high bit is set, swap the endpts and indices for this region
  182. int t;
  183. for (int i=0; i<NCHANNELS_RGBA; ++i)
  184. {
  185. t = endpts[region].A[i]; endpts[region].A[i] = endpts[region].B[i]; endpts[region].B[i] = t;
  186. }
  187. t = endpts[region].a_lsb; endpts[region].a_lsb = endpts[region].b_lsb; endpts[region].b_lsb = t;
  188. for (int y = 0; y < Tile::TILE_H; y++)
  189. for (int x = 0; x < Tile::TILE_W; x++)
  190. if (REGION(x,y,shapeindex) == region)
  191. indices[y][x] = NINDICES - 1 - indices[y][x];
  192. }
  193. }
  194. }
  195. static bool endpts_fit(IntEndptsRGBA_2 endpts[NREGIONS], const Pattern &p)
  196. {
  197. return true;
  198. }
  199. static void write_header(const IntEndptsRGBA_2 endpts[NREGIONS], int shapeindex, const Pattern &p, Bits &out)
  200. {
  201. out.write(p.mode, p.modebits);
  202. for (int j=0; j<NCHANNELS_RGBA; ++j)
  203. for (int i=0; i<NREGIONS; ++i)
  204. {
  205. out.write(endpts[i].A[j], p.chan[j].nbitsizes[ABITINDEX(i)]);
  206. out.write(endpts[i].B[j], p.chan[j].nbitsizes[BBITINDEX(i)]);
  207. }
  208. for (int i=0; i<NREGIONS; ++i)
  209. {
  210. out.write(endpts[i].a_lsb, 1);
  211. out.write(endpts[i].b_lsb, 1);
  212. }
  213. nvAssert (out.getptr() == 65);
  214. }
  215. static void read_header(Bits &in, IntEndptsRGBA_2 endpts[NREGIONS], int &shapeindex, Pattern &p, int &pat_index)
  216. {
  217. int mode = AVPCL::getmode(in);
  218. pat_index = 0;
  219. nvAssert (pat_index >= 0 && pat_index < NPATTERNS);
  220. nvAssert (in.getptr() == patterns[pat_index].modebits);
  221. p = patterns[pat_index];
  222. shapeindex = 0; // we don't have any
  223. for (int j=0; j<NCHANNELS_RGBA; ++j)
  224. for (int i=0; i<NREGIONS; ++i)
  225. {
  226. endpts[i].A[j] = in.read(p.chan[j].nbitsizes[ABITINDEX(i)]);
  227. endpts[i].B[j] = in.read(p.chan[j].nbitsizes[BBITINDEX(i)]);
  228. }
  229. for (int i=0; i<NREGIONS; ++i)
  230. {
  231. endpts[i].a_lsb = in.read(1);
  232. endpts[i].b_lsb = in.read(1);
  233. }
  234. nvAssert (in.getptr() == 65);
  235. }
  236. static void write_indices(const int indices[Tile::TILE_H][Tile::TILE_W], int shapeindex, Bits &out)
  237. {
  238. nvAssert ((indices[0][0] & HIGH_INDEXBIT) == 0);
  239. // the index we shorten is always index 0
  240. for (int i = 0; i < Tile::TILE_TOTAL; ++i)
  241. {
  242. if (i==0)
  243. out.write(indices[i>>2][i&3], INDEXBITS-1); // write i..[2:0]
  244. else
  245. out.write(indices[i>>2][i&3], INDEXBITS); // write i..[3:0]
  246. }
  247. }
  248. static void read_indices(Bits &in, int shapeindex, int indices[Tile::TILE_H][Tile::TILE_W])
  249. {
  250. // the index we shorten is always index 0
  251. for (int i = 0; i < Tile::TILE_TOTAL; ++i)
  252. {
  253. if (i==0)
  254. indices[i>>2][i&3] = in.read(INDEXBITS-1); // read i..[1:0]
  255. else
  256. indices[i>>2][i&3] = in.read(INDEXBITS); // read i..[2:0]
  257. }
  258. }
  259. static void emit_block(const IntEndptsRGBA_2 endpts[NREGIONS], int shapeindex, const Pattern &p, const int indices[Tile::TILE_H][Tile::TILE_W], char *block)
  260. {
  261. Bits out(block, AVPCL::BITSIZE);
  262. write_header(endpts, shapeindex, p, out);
  263. write_indices(indices, shapeindex, out);
  264. nvAssert(out.getptr() == AVPCL::BITSIZE);
  265. }
  266. static void generate_palette_quantized(const IntEndptsRGBA_2 &endpts_2, const RegionPrec &region_prec, Vector4 palette[NINDICES])
  267. {
  268. IntEndptsRGBA endpts;
  269. uncompress_one(endpts_2, endpts);
  270. // scale endpoints
  271. int a, b; // really need a IntVec4...
  272. a = Utils::unquantize(endpts.A[0], region_prec.endpt_a_prec[0]+1); // +1 since we are in uncompressed space
  273. b = Utils::unquantize(endpts.B[0], region_prec.endpt_b_prec[0]+1);
  274. // interpolate
  275. for (int i = 0; i < NINDICES; ++i)
  276. palette[i].x = float(Utils::lerp(a, b, i, BIAS, DENOM));
  277. a = Utils::unquantize(endpts.A[1], region_prec.endpt_a_prec[1]+1);
  278. b = Utils::unquantize(endpts.B[1], region_prec.endpt_b_prec[1]+1);
  279. // interpolate
  280. for (int i = 0; i < NINDICES; ++i)
  281. palette[i].y = float(Utils::lerp(a, b, i, BIAS, DENOM));
  282. a = Utils::unquantize(endpts.A[2], region_prec.endpt_a_prec[2]+1);
  283. b = Utils::unquantize(endpts.B[2], region_prec.endpt_b_prec[2]+1);
  284. // interpolate
  285. for (int i = 0; i < NINDICES; ++i)
  286. palette[i].z = float(Utils::lerp(a, b, i, BIAS, DENOM));
  287. a = Utils::unquantize(endpts.A[3], region_prec.endpt_a_prec[3]+1);
  288. b = Utils::unquantize(endpts.B[3], region_prec.endpt_b_prec[3]+1);
  289. // interpolate
  290. for (int i = 0; i < NINDICES; ++i)
  291. palette[i].w = float(Utils::lerp(a, b, i, BIAS, DENOM));
  292. }
  293. void AVPCL::decompress_mode6(const char *block, Tile &t)
  294. {
  295. Bits in(block, AVPCL::BITSIZE);
  296. Pattern p;
  297. IntEndptsRGBA_2 endpts[NREGIONS];
  298. int shapeindex, pat_index;
  299. read_header(in, endpts, shapeindex, p, pat_index);
  300. Vector4 palette[NREGIONS][NINDICES];
  301. for (int r = 0; r < NREGIONS; ++r)
  302. generate_palette_quantized(endpts[r], pattern_precs[pat_index].region_precs[r], &palette[r][0]);
  303. int indices[Tile::TILE_H][Tile::TILE_W];
  304. read_indices(in, shapeindex, indices);
  305. nvAssert(in.getptr() == AVPCL::BITSIZE);
  306. // lookup
  307. for (int y = 0; y < Tile::TILE_H; y++)
  308. for (int x = 0; x < Tile::TILE_W; x++)
  309. t.data[y][x] = palette[REGION(x,y,shapeindex)][indices[y][x]];
  310. }
  311. // given a collection of colors and quantized endpoints, generate a palette, choose best entries, and return a single toterr
  312. static float map_colors(const Vector4 colors[], const float importance[], int np, const IntEndptsRGBA_2 &endpts, const RegionPrec &region_prec, float current_err, int indices[Tile::TILE_TOTAL])
  313. {
  314. Vector4 palette[NINDICES];
  315. float toterr = 0;
  316. Vector4 err;
  317. generate_palette_quantized(endpts, region_prec, palette);
  318. for (int i = 0; i < np; ++i)
  319. {
  320. float err, besterr = FLT_MAX;
  321. for (int j = 0; j < NINDICES && besterr > 0; ++j)
  322. {
  323. err = !AVPCL::flag_premult ? Utils::metric4(colors[i], palette[j]) :
  324. Utils::metric4premult(colors[i], palette[j]) ;
  325. if (err > besterr) // error increased, so we're done searching
  326. break;
  327. if (err < besterr)
  328. {
  329. besterr = err;
  330. indices[i] = j;
  331. }
  332. }
  333. toterr += besterr;
  334. // check for early exit
  335. if (toterr > current_err)
  336. {
  337. // fill out bogus index values so it's initialized at least
  338. for (int k = i; k < np; ++k)
  339. indices[k] = -1;
  340. return FLT_MAX;
  341. }
  342. }
  343. return toterr;
  344. }
  345. // assign indices given a tile, shape, and quantized endpoints, return toterr for each region
  346. static void assign_indices(const Tile &tile, int shapeindex, IntEndptsRGBA_2 endpts[NREGIONS], const PatternPrec &pattern_prec,
  347. int indices[Tile::TILE_H][Tile::TILE_W], float toterr[NREGIONS])
  348. {
  349. // build list of possibles
  350. Vector4 palette[NREGIONS][NINDICES];
  351. for (int region = 0; region < NREGIONS; ++region)
  352. {
  353. generate_palette_quantized(endpts[region], pattern_prec.region_precs[region], &palette[region][0]);
  354. toterr[region] = 0;
  355. }
  356. Vector4 err;
  357. for (int y = 0; y < tile.size_y; y++)
  358. for (int x = 0; x < tile.size_x; x++)
  359. {
  360. int region = REGION(x,y,shapeindex);
  361. float err, besterr = FLT_MAX;
  362. for (int i = 0; i < NINDICES && besterr > 0; ++i)
  363. {
  364. err = !AVPCL::flag_premult ? Utils::metric4(tile.data[y][x], palette[region][i]) :
  365. Utils::metric4premult(tile.data[y][x], palette[region][i]) ;
  366. if (err > besterr) // error increased, so we're done searching
  367. break;
  368. if (err < besterr)
  369. {
  370. besterr = err;
  371. indices[y][x] = i;
  372. }
  373. }
  374. toterr[region] += besterr;
  375. }
  376. }
  377. // note: indices are valid only if the value returned is less than old_err; otherwise they contain -1's
  378. // this function returns either old_err or a value smaller (if it was successful in improving the error)
  379. static float perturb_one(const Vector4 colors[], const float importance[], int np, int ch, const RegionPrec &region_prec, const IntEndptsRGBA_2 &old_endpts, IntEndptsRGBA_2 &new_endpts,
  380. float old_err, int do_b, int indices[Tile::TILE_TOTAL])
  381. {
  382. // we have the old endpoints: old_endpts
  383. // we have the perturbed endpoints: new_endpts
  384. // we have the temporary endpoints: temp_endpts
  385. IntEndptsRGBA_2 temp_endpts;
  386. float min_err = old_err; // start with the best current error
  387. int beststep;
  388. int temp_indices[Tile::TILE_TOTAL];
  389. for (int i=0; i<np; ++i)
  390. indices[i] = -1;
  391. // copy real endpoints so we can perturb them
  392. temp_endpts = new_endpts = old_endpts;
  393. int prec = do_b ? region_prec.endpt_b_prec[ch] : region_prec.endpt_a_prec[ch];
  394. // do a logarithmic search for the best error for this endpoint (which)
  395. for (int step = 1 << (prec-1); step; step >>= 1)
  396. {
  397. bool improved = false;
  398. for (int sign = -1; sign <= 1; sign += 2)
  399. {
  400. if (do_b == 0)
  401. {
  402. temp_endpts.A[ch] = new_endpts.A[ch] + sign * step;
  403. if (temp_endpts.A[ch] < 0 || temp_endpts.A[ch] >= (1 << prec))
  404. continue;
  405. }
  406. else
  407. {
  408. temp_endpts.B[ch] = new_endpts.B[ch] + sign * step;
  409. if (temp_endpts.B[ch] < 0 || temp_endpts.B[ch] >= (1 << prec))
  410. continue;
  411. }
  412. float err = map_colors(colors, importance, np, temp_endpts, region_prec, min_err, temp_indices);
  413. if (err < min_err)
  414. {
  415. improved = true;
  416. min_err = err;
  417. beststep = sign * step;
  418. for (int i=0; i<np; ++i)
  419. indices[i] = temp_indices[i];
  420. }
  421. }
  422. // if this was an improvement, move the endpoint and continue search from there
  423. if (improved)
  424. {
  425. if (do_b == 0)
  426. new_endpts.A[ch] += beststep;
  427. else
  428. new_endpts.B[ch] += beststep;
  429. }
  430. }
  431. return min_err;
  432. }
  433. // the larger the error the more time it is worth spending on an exhaustive search.
  434. // perturb the endpoints at least -3 to 3.
  435. // if err > 5000 perturb endpoints 50% of precision
  436. // if err > 1000 25%
  437. // if err > 200 12.5%
  438. // if err > 40 6.25%
  439. // for np = 16 -- adjust error thresholds as a function of np
  440. // always ensure endpoint ordering is preserved (no need to overlap the scan)
  441. // if orig_err returned from this is less than its input value, then indices[] will contain valid indices
  442. static float exhaustive(const Vector4 colors[], const float importance[], int np, int ch, const RegionPrec &region_prec, float orig_err, IntEndptsRGBA_2 &opt_endpts, int indices[Tile::TILE_TOTAL])
  443. {
  444. IntEndptsRGBA_2 temp_endpts;
  445. float best_err = orig_err;
  446. int aprec = region_prec.endpt_a_prec[ch];
  447. int bprec = region_prec.endpt_b_prec[ch];
  448. int good_indices[Tile::TILE_TOTAL];
  449. int temp_indices[Tile::TILE_TOTAL];
  450. for (int i=0; i<np; ++i)
  451. indices[i] = -1;
  452. float thr_scale = (float)np / (float)Tile::TILE_TOTAL;
  453. if (orig_err == 0) return orig_err;
  454. int adelta = 0, bdelta = 0;
  455. if (orig_err > 5000.0*thr_scale) { adelta = (1 << aprec)/2; bdelta = (1 << bprec)/2; }
  456. else if (orig_err > 1000.0*thr_scale) { adelta = (1 << aprec)/4; bdelta = (1 << bprec)/4; }
  457. else if (orig_err > 200.0*thr_scale) { adelta = (1 << aprec)/8; bdelta = (1 << bprec)/8; }
  458. else if (orig_err > 40.0*thr_scale) { adelta = (1 << aprec)/16; bdelta = (1 << bprec)/16; }
  459. adelta = max(adelta, 3);
  460. bdelta = max(bdelta, 3);
  461. #ifdef DISABLE_EXHAUSTIVE
  462. adelta = bdelta = 3;
  463. #endif
  464. temp_endpts = opt_endpts;
  465. // ok figure out the range of A and B
  466. int alow = max(0, opt_endpts.A[ch] - adelta);
  467. int ahigh = min((1<<aprec)-1, opt_endpts.A[ch] + adelta);
  468. int blow = max(0, opt_endpts.B[ch] - bdelta);
  469. int bhigh = min((1<<bprec)-1, opt_endpts.B[ch] + bdelta);
  470. // now there's no need to swap the ordering of A and B
  471. bool a_le_b = opt_endpts.A[ch] <= opt_endpts.B[ch];
  472. int amin, bmin;
  473. if (opt_endpts.A[ch] <= opt_endpts.B[ch])
  474. {
  475. // keep a <= b
  476. for (int a = alow; a <= ahigh; ++a)
  477. for (int b = max(a, blow); b < bhigh; ++b)
  478. {
  479. temp_endpts.A[ch] = a;
  480. temp_endpts.B[ch] = b;
  481. float err = map_colors(colors, importance, np, temp_endpts, region_prec, best_err, temp_indices);
  482. if (err < best_err)
  483. {
  484. amin = a;
  485. bmin = b;
  486. best_err = err;
  487. for (int i=0; i<np; ++i)
  488. good_indices[i] = temp_indices[i];
  489. }
  490. }
  491. }
  492. else
  493. {
  494. // keep b <= a
  495. for (int b = blow; b < bhigh; ++b)
  496. for (int a = max(b, alow); a <= ahigh; ++a)
  497. {
  498. temp_endpts.A[ch] = a;
  499. temp_endpts.B[ch] = b;
  500. float err = map_colors(colors, importance, np, temp_endpts, region_prec, best_err, temp_indices);
  501. if (err < best_err)
  502. {
  503. amin = a;
  504. bmin = b;
  505. best_err = err;
  506. for (int i=0; i<np; ++i)
  507. good_indices[i] = temp_indices[i];
  508. }
  509. }
  510. }
  511. if (best_err < orig_err)
  512. {
  513. opt_endpts.A[ch] = amin;
  514. opt_endpts.B[ch] = bmin;
  515. orig_err = best_err;
  516. // if we actually improved, update the indices
  517. for (int i=0; i<np; ++i)
  518. indices[i] = good_indices[i];
  519. }
  520. return best_err;
  521. }
  522. static float optimize_one(const Vector4 colors[], const float importance[], int np, float orig_err, const IntEndptsRGBA_2 &orig_endpts, const RegionPrec &region_prec, IntEndptsRGBA_2 &opt_endpts)
  523. {
  524. float opt_err = orig_err;
  525. opt_endpts = orig_endpts;
  526. /*
  527. err0 = perturb(rgb0, delta0)
  528. err1 = perturb(rgb1, delta1)
  529. if (err0 < err1)
  530. if (err0 >= initial_error) break
  531. rgb0 += delta0
  532. next = 1
  533. else
  534. if (err1 >= initial_error) break
  535. rgb1 += delta1
  536. next = 0
  537. initial_err = map()
  538. for (;;)
  539. err = perturb(next ? rgb1:rgb0, delta)
  540. if (err >= initial_err) break
  541. next? rgb1 : rgb0 += delta
  542. initial_err = err
  543. */
  544. IntEndptsRGBA_2 new_a, new_b;
  545. IntEndptsRGBA_2 new_endpt;
  546. int do_b;
  547. int orig_indices[Tile::TILE_TOTAL];
  548. int new_indices[Tile::TILE_TOTAL];
  549. int temp_indices0[Tile::TILE_TOTAL];
  550. int temp_indices1[Tile::TILE_TOTAL];
  551. // now optimize each channel separately
  552. // for the first error improvement, we save the indices. then, for any later improvement, we compare the indices
  553. // if they differ, we restart the loop (which then falls back to looking for a first improvement.)
  554. for (int ch = 0; ch < NCHANNELS_RGBA; ++ch)
  555. {
  556. // figure out which endpoint when perturbed gives the most improvement and start there
  557. // if we just alternate, we can easily end up in a local minima
  558. float err0 = perturb_one(colors, importance, np, ch, region_prec, opt_endpts, new_a, opt_err, 0, temp_indices0); // perturb endpt A
  559. float err1 = perturb_one(colors, importance, np, ch, region_prec, opt_endpts, new_b, opt_err, 1, temp_indices1); // perturb endpt B
  560. if (err0 < err1)
  561. {
  562. if (err0 >= opt_err)
  563. continue;
  564. for (int i=0; i<np; ++i)
  565. {
  566. new_indices[i] = orig_indices[i] = temp_indices0[i];
  567. nvAssert (orig_indices[i] != -1);
  568. }
  569. opt_endpts.A[ch] = new_a.A[ch];
  570. opt_err = err0;
  571. do_b = 1; // do B next
  572. }
  573. else
  574. {
  575. if (err1 >= opt_err)
  576. continue;
  577. for (int i=0; i<np; ++i)
  578. {
  579. new_indices[i] = orig_indices[i] = temp_indices1[i];
  580. nvAssert (orig_indices[i] != -1);
  581. }
  582. opt_endpts.B[ch] = new_b.B[ch];
  583. opt_err = err1;
  584. do_b = 0; // do A next
  585. }
  586. // now alternate endpoints and keep trying until there is no improvement
  587. for (;;)
  588. {
  589. float err = perturb_one(colors, importance, np, ch, region_prec, opt_endpts, new_endpt, opt_err, do_b, temp_indices0);
  590. if (err >= opt_err)
  591. break;
  592. for (int i=0; i<np; ++i)
  593. {
  594. new_indices[i] = temp_indices0[i];
  595. nvAssert (new_indices[i] != -1);
  596. }
  597. if (do_b == 0)
  598. opt_endpts.A[ch] = new_endpt.A[ch];
  599. else
  600. opt_endpts.B[ch] = new_endpt.B[ch];
  601. opt_err = err;
  602. do_b = 1 - do_b; // now move the other endpoint
  603. }
  604. // see if the indices have changed
  605. int i;
  606. for (i=0; i<np; ++i)
  607. if (orig_indices[i] != new_indices[i])
  608. break;
  609. if (i<np)
  610. ch = -1; // start over
  611. }
  612. // finally, do a small exhaustive search around what we think is the global minima to be sure
  613. // note this is independent of the above search, so we don't care about the indices from the above
  614. // we don't care about the above because if they differ, so what? we've already started at ch=0
  615. bool first = true;
  616. for (int ch = 0; ch < NCHANNELS_RGBA; ++ch)
  617. {
  618. float new_err = exhaustive(colors, importance, np, ch, region_prec, opt_err, opt_endpts, temp_indices0);
  619. if (new_err < opt_err)
  620. {
  621. opt_err = new_err;
  622. if (first)
  623. {
  624. for (int i=0; i<np; ++i)
  625. {
  626. orig_indices[i] = temp_indices0[i];
  627. nvAssert (orig_indices[i] != -1);
  628. }
  629. first = false;
  630. }
  631. else
  632. {
  633. // see if the indices have changed
  634. int i;
  635. for (i=0; i<np; ++i)
  636. if (orig_indices[i] != temp_indices0[i])
  637. break;
  638. if (i<np)
  639. {
  640. ch = -1; // start over
  641. first = true;
  642. }
  643. }
  644. }
  645. }
  646. return opt_err;
  647. }
  648. static void optimize_endpts(const Tile &tile, int shapeindex, const float orig_err[NREGIONS],
  649. IntEndptsRGBA_2 orig_endpts[NREGIONS], const PatternPrec &pattern_prec, float opt_err[NREGIONS], IntEndptsRGBA_2 opt_endpts[NREGIONS])
  650. {
  651. Vector4 pixels[Tile::TILE_TOTAL];
  652. float importance[Tile::TILE_TOTAL];
  653. IntEndptsRGBA_2 temp_in, temp_out;
  654. int temp_indices[Tile::TILE_TOTAL];
  655. for (int region=0; region<NREGIONS; ++region)
  656. {
  657. // collect the pixels in the region
  658. int np = 0;
  659. for (int y = 0; y < tile.size_y; y++) {
  660. for (int x = 0; x < tile.size_x; x++) {
  661. if (REGION(x, y, shapeindex) == region) {
  662. pixels[np] = tile.data[y][x];
  663. importance[np] = tile.importance_map[y][x];
  664. np++;
  665. }
  666. }
  667. }
  668. opt_endpts[region] = temp_in = orig_endpts[region];
  669. opt_err[region] = orig_err[region];
  670. float best_err = orig_err[region];
  671. // try all lsb modes as we search for better endpoints
  672. for (int lsbmode=0; lsbmode<NLSBMODES; ++lsbmode)
  673. {
  674. temp_in.a_lsb = lsbmode & 1;
  675. temp_in.b_lsb = (lsbmode >> 1) & 1;
  676. // make sure we have a valid error for temp_in
  677. // we use FLT_MAX here because we want an accurate temp_in_err, no shortcuts
  678. // (mapcolors will compute a mapping but will stop if the error exceeds the value passed in the FLT_MAX position)
  679. float temp_in_err = map_colors(pixels, importance, np, temp_in, pattern_prec.region_precs[region], FLT_MAX, temp_indices);
  680. // now try to optimize these endpoints
  681. float temp_out_err = optimize_one(pixels, importance, np, temp_in_err, temp_in, pattern_prec.region_precs[region], temp_out);
  682. // if we find an improvement, update the best so far and correct the output endpoints and errors
  683. if (temp_out_err < best_err)
  684. {
  685. best_err = temp_out_err;
  686. opt_err[region] = temp_out_err;
  687. opt_endpts[region] = temp_out;
  688. }
  689. }
  690. }
  691. }
  692. /* optimization algorithm
  693. for each pattern
  694. convert endpoints using pattern precision
  695. assign indices and get initial error
  696. compress indices (and possibly reorder endpoints)
  697. transform endpoints
  698. if transformed endpoints fit pattern
  699. get original endpoints back
  700. optimize endpoints, get new endpoints, new indices, and new error // new error will almost always be better
  701. compress new indices
  702. transform new endpoints
  703. if new endpoints fit pattern AND if error is improved
  704. emit compressed block with new data
  705. else
  706. emit compressed block with original data // to try to preserve maximum endpoint precision
  707. simplify the above given that there is no transform now and that endpoints will always fit
  708. */
  709. static float refine(const Tile &tile, int shapeindex_best, const FltEndpts endpts[NREGIONS], char *block)
  710. {
  711. float orig_err[NREGIONS], opt_err[NREGIONS], orig_toterr, opt_toterr, expected_opt_err[NREGIONS];
  712. IntEndptsRGBA_2 orig_endpts[NREGIONS], opt_endpts[NREGIONS];
  713. int orig_indices[Tile::TILE_H][Tile::TILE_W], opt_indices[Tile::TILE_H][Tile::TILE_W];
  714. for (int sp = 0; sp < NPATTERNS; ++sp)
  715. {
  716. quantize_endpts(endpts, pattern_precs[sp], orig_endpts);
  717. assign_indices(tile, shapeindex_best, orig_endpts, pattern_precs[sp], orig_indices, orig_err);
  718. swap_indices(orig_endpts, orig_indices, shapeindex_best);
  719. optimize_endpts(tile, shapeindex_best, orig_err, orig_endpts, pattern_precs[sp], expected_opt_err, opt_endpts);
  720. assign_indices(tile, shapeindex_best, opt_endpts, pattern_precs[sp], opt_indices, opt_err);
  721. // (nreed) Commented out asserts because they go off all the time...not sure why
  722. //for (int i=0; i<NREGIONS; ++i)
  723. // nvAssert(expected_opt_err[i] == opt_err[i]);
  724. swap_indices(opt_endpts, opt_indices, shapeindex_best);
  725. orig_toterr = opt_toterr = 0;
  726. for (int i=0; i < NREGIONS; ++i) { orig_toterr += orig_err[i]; opt_toterr += opt_err[i]; }
  727. //nvAssert(opt_toterr <= orig_toterr);
  728. if (opt_toterr < orig_toterr)
  729. {
  730. emit_block(opt_endpts, shapeindex_best, patterns[sp], opt_indices, block);
  731. return opt_toterr;
  732. }
  733. else
  734. {
  735. emit_block(orig_endpts, shapeindex_best, patterns[sp], orig_indices, block);
  736. return orig_toterr;
  737. }
  738. }
  739. nvAssert(false); //throw "No candidate found, should never happen (mode avpcl 6).";
  740. return FLT_MAX;
  741. }
  742. static void clamp(Vector4 &v)
  743. {
  744. if (v.x < 0.0f) v.x = 0.0f;
  745. if (v.x > 255.0f) v.x = 255.0f;
  746. if (v.y < 0.0f) v.y = 0.0f;
  747. if (v.y > 255.0f) v.y = 255.0f;
  748. if (v.z < 0.0f) v.z = 0.0f;
  749. if (v.z > 255.0f) v.z = 255.0f;
  750. if (v.w < 0.0f) v.w = 0.0f;
  751. if (v.w > 255.0f) v.w = 255.0f;
  752. }
  753. static void generate_palette_unquantized(const FltEndpts endpts[NREGIONS], Vector4 palette[NREGIONS][NINDICES])
  754. {
  755. for (int region = 0; region < NREGIONS; ++region)
  756. for (int i = 0; i < NINDICES; ++i)
  757. palette[region][i] = Utils::lerp(endpts[region].A, endpts[region].B, i, 0, DENOM);
  758. }
  759. // generate a palette from unquantized endpoints, then pick best palette color for all pixels in each region, return toterr for all regions combined
  760. static float map_colors(const Tile &tile, int shapeindex, const FltEndpts endpts[NREGIONS])
  761. {
  762. // build list of possibles
  763. Vector4 palette[NREGIONS][NINDICES];
  764. generate_palette_unquantized(endpts, palette);
  765. float toterr = 0;
  766. Vector4 err;
  767. for (int y = 0; y < tile.size_y; y++)
  768. for (int x = 0; x < tile.size_x; x++)
  769. {
  770. int region = REGION(x,y,shapeindex);
  771. float err, besterr;
  772. besterr = Utils::metric4(tile.data[y][x], palette[region][0]);
  773. for (int i = 1; i < NINDICES && besterr > 0; ++i)
  774. {
  775. err = Utils::metric4(tile.data[y][x], palette[region][i]);
  776. if (err > besterr) // error increased, so we're done searching. this works for most norms.
  777. break;
  778. if (err < besterr)
  779. besterr = err;
  780. }
  781. toterr += besterr;
  782. }
  783. return toterr;
  784. }
  785. static float rough(const Tile &tile, int shapeindex, FltEndpts endpts[NREGIONS])
  786. {
  787. for (int region=0; region<NREGIONS; ++region)
  788. {
  789. int np = 0;
  790. Vector4 colors[Tile::TILE_TOTAL];
  791. Vector4 mean(0,0,0,0);
  792. for (int y = 0; y < tile.size_y; y++)
  793. for (int x = 0; x < tile.size_x; x++)
  794. if (REGION(x,y,shapeindex) == region)
  795. {
  796. colors[np] = tile.data[y][x];
  797. mean += tile.data[y][x];
  798. ++np;
  799. }
  800. // handle simple cases
  801. if (np == 0)
  802. {
  803. Vector4 zero(0,0,0,255.0f);
  804. endpts[region].A = zero;
  805. endpts[region].B = zero;
  806. continue;
  807. }
  808. else if (np == 1)
  809. {
  810. endpts[region].A = colors[0];
  811. endpts[region].B = colors[0];
  812. continue;
  813. }
  814. else if (np == 2)
  815. {
  816. endpts[region].A = colors[0];
  817. endpts[region].B = colors[1];
  818. continue;
  819. }
  820. mean /= float(np);
  821. Vector4 direction = Fit::computePrincipalComponent_EigenSolver(np, colors);
  822. // project each pixel value along the principal direction
  823. float minp = FLT_MAX, maxp = -FLT_MAX;
  824. for (int i = 0; i < np; i++)
  825. {
  826. float dp = dot(colors[i]-mean, direction);
  827. if (dp < minp) minp = dp;
  828. if (dp > maxp) maxp = dp;
  829. }
  830. // choose as endpoints 2 points along the principal direction that span the projections of all of the pixel values
  831. endpts[region].A = mean + minp*direction;
  832. endpts[region].B = mean + maxp*direction;
  833. // clamp endpoints
  834. // the argument for clamping is that the actual endpoints need to be clamped and thus we need to choose the best
  835. // shape based on endpoints being clamped
  836. clamp(endpts[region].A);
  837. clamp(endpts[region].B);
  838. }
  839. return map_colors(tile, shapeindex, endpts);
  840. }
  841. static void swap(float *list1, int *list2, int i, int j)
  842. {
  843. float t = list1[i]; list1[i] = list1[j]; list1[j] = t;
  844. int t1 = list2[i]; list2[i] = list2[j]; list2[j] = t1;
  845. }
  846. float AVPCL::compress_mode6(const Tile &t, char *block)
  847. {
  848. // number of rough cases to look at. reasonable values of this are 1, NSHAPES/4, and NSHAPES
  849. // NSHAPES/4 gets nearly all the cases; you can increase that a bit (say by 3 or 4) if you really want to squeeze the last bit out
  850. const int NITEMS=1;
  851. // pick the best NITEMS shapes and refine these.
  852. struct {
  853. FltEndpts endpts[NREGIONS];
  854. } all[NSHAPES];
  855. float roughmse[NSHAPES];
  856. int index[NSHAPES];
  857. char tempblock[AVPCL::BLOCKSIZE];
  858. float msebest = FLT_MAX;
  859. for (int i=0; i<NSHAPES; ++i)
  860. {
  861. roughmse[i] = rough(t, i, &all[i].endpts[0]);
  862. index[i] = i;
  863. }
  864. // bubble sort -- only need to bubble up the first NITEMS items
  865. for (int i=0; i<NITEMS; ++i)
  866. for (int j=i+1; j<NSHAPES; ++j)
  867. if (roughmse[i] > roughmse[j])
  868. swap(roughmse, index, i, j);
  869. for (int i=0; i<NITEMS && msebest>0; ++i)
  870. {
  871. int shape = index[i];
  872. float mse = refine(t, shape, &all[shape].endpts[0], tempblock);
  873. if (mse < msebest)
  874. {
  875. memcpy(block, tempblock, sizeof(tempblock));
  876. msebest = mse;
  877. }
  878. }
  879. return msebest;
  880. }