ProcessRGB.cpp 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720
  1. #include <array>
  2. #include <string.h>
  3. #include "Math.hpp"
  4. #include "ProcessCommon.hpp"
  5. #include "ProcessRGB.hpp"
  6. #include "Tables.hpp"
  7. #include "Types.hpp"
  8. #include "Vector.hpp"
  9. #include <bx/endian.h>
  10. #ifdef __SSE4_1__
  11. # ifdef _MSC_VER
  12. # include <intrin.h>
  13. # include <Windows.h>
  14. # else
  15. # include <x86intrin.h>
  16. # endif
  17. #endif
  18. namespace
  19. {
  20. typedef uint16 v4i[4];
  21. void Average( const uint8* data, v4i* a )
  22. {
  23. #ifdef __SSE4_1__
  24. __m128i d0 = _mm_loadu_si128(((__m128i*)data) + 0);
  25. __m128i d1 = _mm_loadu_si128(((__m128i*)data) + 1);
  26. __m128i d2 = _mm_loadu_si128(((__m128i*)data) + 2);
  27. __m128i d3 = _mm_loadu_si128(((__m128i*)data) + 3);
  28. __m128i d0l = _mm_unpacklo_epi8(d0, _mm_setzero_si128());
  29. __m128i d0h = _mm_unpackhi_epi8(d0, _mm_setzero_si128());
  30. __m128i d1l = _mm_unpacklo_epi8(d1, _mm_setzero_si128());
  31. __m128i d1h = _mm_unpackhi_epi8(d1, _mm_setzero_si128());
  32. __m128i d2l = _mm_unpacklo_epi8(d2, _mm_setzero_si128());
  33. __m128i d2h = _mm_unpackhi_epi8(d2, _mm_setzero_si128());
  34. __m128i d3l = _mm_unpacklo_epi8(d3, _mm_setzero_si128());
  35. __m128i d3h = _mm_unpackhi_epi8(d3, _mm_setzero_si128());
  36. __m128i sum0 = _mm_add_epi16(d0l, d1l);
  37. __m128i sum1 = _mm_add_epi16(d0h, d1h);
  38. __m128i sum2 = _mm_add_epi16(d2l, d3l);
  39. __m128i sum3 = _mm_add_epi16(d2h, d3h);
  40. __m128i sum0l = _mm_unpacklo_epi16(sum0, _mm_setzero_si128());
  41. __m128i sum0h = _mm_unpackhi_epi16(sum0, _mm_setzero_si128());
  42. __m128i sum1l = _mm_unpacklo_epi16(sum1, _mm_setzero_si128());
  43. __m128i sum1h = _mm_unpackhi_epi16(sum1, _mm_setzero_si128());
  44. __m128i sum2l = _mm_unpacklo_epi16(sum2, _mm_setzero_si128());
  45. __m128i sum2h = _mm_unpackhi_epi16(sum2, _mm_setzero_si128());
  46. __m128i sum3l = _mm_unpacklo_epi16(sum3, _mm_setzero_si128());
  47. __m128i sum3h = _mm_unpackhi_epi16(sum3, _mm_setzero_si128());
  48. __m128i b0 = _mm_add_epi32(sum0l, sum0h);
  49. __m128i b1 = _mm_add_epi32(sum1l, sum1h);
  50. __m128i b2 = _mm_add_epi32(sum2l, sum2h);
  51. __m128i b3 = _mm_add_epi32(sum3l, sum3h);
  52. __m128i a0 = _mm_srli_epi32(_mm_add_epi32(_mm_add_epi32(b2, b3), _mm_set1_epi32(4)), 3);
  53. __m128i a1 = _mm_srli_epi32(_mm_add_epi32(_mm_add_epi32(b0, b1), _mm_set1_epi32(4)), 3);
  54. __m128i a2 = _mm_srli_epi32(_mm_add_epi32(_mm_add_epi32(b1, b3), _mm_set1_epi32(4)), 3);
  55. __m128i a3 = _mm_srli_epi32(_mm_add_epi32(_mm_add_epi32(b0, b2), _mm_set1_epi32(4)), 3);
  56. _mm_storeu_si128((__m128i*)&a[0], _mm_packus_epi32(_mm_shuffle_epi32(a0, _MM_SHUFFLE(3, 0, 1, 2)), _mm_shuffle_epi32(a1, _MM_SHUFFLE(3, 0, 1, 2))));
  57. _mm_storeu_si128((__m128i*)&a[2], _mm_packus_epi32(_mm_shuffle_epi32(a2, _MM_SHUFFLE(3, 0, 1, 2)), _mm_shuffle_epi32(a3, _MM_SHUFFLE(3, 0, 1, 2))));
  58. #else
  59. uint32 r[4];
  60. uint32 g[4];
  61. uint32 b[4];
  62. memset(r, 0, sizeof(r));
  63. memset(g, 0, sizeof(g));
  64. memset(b, 0, sizeof(b));
  65. for( int j=0; j<4; j++ )
  66. {
  67. for( int i=0; i<4; i++ )
  68. {
  69. int index = (j & 2) + (i >> 1);
  70. b[index] += *data++;
  71. g[index] += *data++;
  72. r[index] += *data++;
  73. data++;
  74. }
  75. }
  76. a[0][0] = uint16( (r[2] + r[3] + 4) / 8 );
  77. a[0][1] = uint16( (g[2] + g[3] + 4) / 8 );
  78. a[0][2] = uint16( (b[2] + b[3] + 4) / 8 );
  79. a[0][3] = 0;
  80. a[1][0] = uint16( (r[0] + r[1] + 4) / 8 );
  81. a[1][1] = uint16( (g[0] + g[1] + 4) / 8 );
  82. a[1][2] = uint16( (b[0] + b[1] + 4) / 8 );
  83. a[1][3] = 0;
  84. a[2][0] = uint16( (r[1] + r[3] + 4) / 8 );
  85. a[2][1] = uint16( (g[1] + g[3] + 4) / 8 );
  86. a[2][2] = uint16( (b[1] + b[3] + 4) / 8 );
  87. a[2][3] = 0;
  88. a[3][0] = uint16( (r[0] + r[2] + 4) / 8 );
  89. a[3][1] = uint16( (g[0] + g[2] + 4) / 8 );
  90. a[3][2] = uint16( (b[0] + b[2] + 4) / 8 );
  91. a[3][3] = 0;
  92. #endif
  93. }
  94. void CalcErrorBlock( const uint8* data, uint err[4][4] )
  95. {
  96. #ifdef __SSE4_1__
  97. __m128i d0 = _mm_loadu_si128(((__m128i*)data) + 0);
  98. __m128i d1 = _mm_loadu_si128(((__m128i*)data) + 1);
  99. __m128i d2 = _mm_loadu_si128(((__m128i*)data) + 2);
  100. __m128i d3 = _mm_loadu_si128(((__m128i*)data) + 3);
  101. __m128i dm0 = _mm_and_si128(d0, _mm_set1_epi32(0x00FFFFFF));
  102. __m128i dm1 = _mm_and_si128(d1, _mm_set1_epi32(0x00FFFFFF));
  103. __m128i dm2 = _mm_and_si128(d2, _mm_set1_epi32(0x00FFFFFF));
  104. __m128i dm3 = _mm_and_si128(d3, _mm_set1_epi32(0x00FFFFFF));
  105. __m128i d0l = _mm_unpacklo_epi8(dm0, _mm_setzero_si128());
  106. __m128i d0h = _mm_unpackhi_epi8(dm0, _mm_setzero_si128());
  107. __m128i d1l = _mm_unpacklo_epi8(dm1, _mm_setzero_si128());
  108. __m128i d1h = _mm_unpackhi_epi8(dm1, _mm_setzero_si128());
  109. __m128i d2l = _mm_unpacklo_epi8(dm2, _mm_setzero_si128());
  110. __m128i d2h = _mm_unpackhi_epi8(dm2, _mm_setzero_si128());
  111. __m128i d3l = _mm_unpacklo_epi8(dm3, _mm_setzero_si128());
  112. __m128i d3h = _mm_unpackhi_epi8(dm3, _mm_setzero_si128());
  113. __m128i sum0 = _mm_add_epi16(d0l, d1l);
  114. __m128i sum1 = _mm_add_epi16(d0h, d1h);
  115. __m128i sum2 = _mm_add_epi16(d2l, d3l);
  116. __m128i sum3 = _mm_add_epi16(d2h, d3h);
  117. __m128i sum0l = _mm_unpacklo_epi16(sum0, _mm_setzero_si128());
  118. __m128i sum0h = _mm_unpackhi_epi16(sum0, _mm_setzero_si128());
  119. __m128i sum1l = _mm_unpacklo_epi16(sum1, _mm_setzero_si128());
  120. __m128i sum1h = _mm_unpackhi_epi16(sum1, _mm_setzero_si128());
  121. __m128i sum2l = _mm_unpacklo_epi16(sum2, _mm_setzero_si128());
  122. __m128i sum2h = _mm_unpackhi_epi16(sum2, _mm_setzero_si128());
  123. __m128i sum3l = _mm_unpacklo_epi16(sum3, _mm_setzero_si128());
  124. __m128i sum3h = _mm_unpackhi_epi16(sum3, _mm_setzero_si128());
  125. __m128i b0 = _mm_add_epi32(sum0l, sum0h);
  126. __m128i b1 = _mm_add_epi32(sum1l, sum1h);
  127. __m128i b2 = _mm_add_epi32(sum2l, sum2h);
  128. __m128i b3 = _mm_add_epi32(sum3l, sum3h);
  129. __m128i a0 = _mm_add_epi32(b2, b3);
  130. __m128i a1 = _mm_add_epi32(b0, b1);
  131. __m128i a2 = _mm_add_epi32(b1, b3);
  132. __m128i a3 = _mm_add_epi32(b0, b2);
  133. _mm_storeu_si128((__m128i*)&err[0], a0);
  134. _mm_storeu_si128((__m128i*)&err[1], a1);
  135. _mm_storeu_si128((__m128i*)&err[2], a2);
  136. _mm_storeu_si128((__m128i*)&err[3], a3);
  137. #else
  138. uint terr[4][4];
  139. memset(terr, 0, 16 * sizeof(uint));
  140. for( int j=0; j<4; j++ )
  141. {
  142. for( int i=0; i<4; i++ )
  143. {
  144. int index = (j & 2) + (i >> 1);
  145. uint d = *data++;
  146. terr[index][0] += d;
  147. d = *data++;
  148. terr[index][1] += d;
  149. d = *data++;
  150. terr[index][2] += d;
  151. data++;
  152. }
  153. }
  154. for( int i=0; i<3; i++ )
  155. {
  156. err[0][i] = terr[2][i] + terr[3][i];
  157. err[1][i] = terr[0][i] + terr[1][i];
  158. err[2][i] = terr[1][i] + terr[3][i];
  159. err[3][i] = terr[0][i] + terr[2][i];
  160. }
  161. for( int i=0; i<4; i++ )
  162. {
  163. err[i][3] = 0;
  164. }
  165. #endif
  166. }
  167. uint CalcError( const uint block[4], const v4i& average )
  168. {
  169. uint err = 0x3FFFFFFF; // Big value to prevent negative values, but small enough to prevent overflow
  170. err -= block[0] * 2 * average[2];
  171. err -= block[1] * 2 * average[1];
  172. err -= block[2] * 2 * average[0];
  173. err += 8 * ( sq( average[0] ) + sq( average[1] ) + sq( average[2] ) );
  174. return err;
  175. }
  176. void ProcessAverages( v4i* a )
  177. {
  178. #ifdef __SSE4_1__
  179. for( int i=0; i<2; i++ )
  180. {
  181. __m128i d = _mm_loadu_si128((__m128i*)a[i*2].data());
  182. __m128i t = _mm_add_epi16(_mm_mullo_epi16(d, _mm_set1_epi16(31)), _mm_set1_epi16(128));
  183. __m128i c = _mm_srli_epi16(_mm_add_epi16(t, _mm_srli_epi16(t, 8)), 8);
  184. __m128i c1 = _mm_shuffle_epi32(c, _MM_SHUFFLE(3, 2, 3, 2));
  185. __m128i diff = _mm_sub_epi16(c, c1);
  186. diff = _mm_max_epi16(diff, _mm_set1_epi16(-4));
  187. diff = _mm_min_epi16(diff, _mm_set1_epi16(3));
  188. __m128i co = _mm_add_epi16(c1, diff);
  189. c = _mm_blend_epi16(co, c, 0xF0);
  190. __m128i a0 = _mm_or_si128(_mm_slli_epi16(c, 3), _mm_srli_epi16(c, 2));
  191. _mm_storeu_si128((__m128i*)a[4+i*2].data(), a0);
  192. }
  193. for( int i=0; i<2; i++ )
  194. {
  195. __m128i d = _mm_loadu_si128((__m128i*)a[i*2].data());
  196. __m128i t0 = _mm_add_epi16(_mm_mullo_epi16(d, _mm_set1_epi16(15)), _mm_set1_epi16(128));
  197. __m128i t1 = _mm_srli_epi16(_mm_add_epi16(t0, _mm_srli_epi16(t0, 8)), 8);
  198. __m128i t2 = _mm_or_si128(t1, _mm_slli_epi16(t1, 4));
  199. _mm_storeu_si128((__m128i*)a[i*2].data(), t2);
  200. }
  201. #else
  202. for( int i=0; i<2; i++ )
  203. {
  204. for( int j=0; j<3; j++ )
  205. {
  206. int32 c1 = mul8bit( a[i*2+1][j], 31 );
  207. int32 c2 = mul8bit( a[i*2][j], 31 );
  208. int32 diff = c2 - c1;
  209. if( diff > 3 ) diff = 3;
  210. else if( diff < -4 ) diff = -4;
  211. int32 co = c1 + diff;
  212. a[5+i*2][j] = ( c1 << 3 ) | ( c1 >> 2 );
  213. a[4+i*2][j] = ( co << 3 ) | ( co >> 2 );
  214. }
  215. }
  216. for( int i=0; i<4; i++ )
  217. {
  218. a[i][0] = g_avg2[mul8bit( a[i][0], 15 )];
  219. a[i][1] = g_avg2[mul8bit( a[i][1], 15 )];
  220. a[i][2] = g_avg2[mul8bit( a[i][2], 15 )];
  221. }
  222. #endif
  223. }
  224. void EncodeAverages( uint64& _d, const v4i* a, size_t idx )
  225. {
  226. uint64 d = _d;
  227. d |= ( idx << 24 );
  228. size_t base = idx << 1;
  229. if( ( idx & 0x2 ) == 0 )
  230. {
  231. for( int i=0; i<3; i++ )
  232. {
  233. d |= uint64( a[base+0][i] >> 4 ) << ( i*8 );
  234. d |= uint64( a[base+1][i] >> 4 ) << ( i*8 + 4 );
  235. }
  236. }
  237. else
  238. {
  239. for( int i=0; i<3; i++ )
  240. {
  241. d |= uint64( a[base+1][i] & 0xF8 ) << ( i*8 );
  242. int32 c = ( ( a[base+0][i] & 0xF8 ) - ( a[base+1][i] & 0xF8 ) ) >> 3;
  243. c &= ~0xFFFFFFF8;
  244. d |= ((uint64)c) << ( i*8 );
  245. }
  246. }
  247. _d = d;
  248. }
  249. uint64 CheckSolid( const uint8* src )
  250. {
  251. #ifdef __SSE4_1__
  252. __m128i d0 = _mm_loadu_si128(((__m128i*)src) + 0);
  253. __m128i d1 = _mm_loadu_si128(((__m128i*)src) + 1);
  254. __m128i d2 = _mm_loadu_si128(((__m128i*)src) + 2);
  255. __m128i d3 = _mm_loadu_si128(((__m128i*)src) + 3);
  256. __m128i c = _mm_shuffle_epi32(d0, _MM_SHUFFLE(0, 0, 0, 0));
  257. __m128i c0 = _mm_cmpeq_epi8(d0, c);
  258. __m128i c1 = _mm_cmpeq_epi8(d1, c);
  259. __m128i c2 = _mm_cmpeq_epi8(d2, c);
  260. __m128i c3 = _mm_cmpeq_epi8(d3, c);
  261. __m128i m0 = _mm_and_si128(c0, c1);
  262. __m128i m1 = _mm_and_si128(c2, c3);
  263. __m128i m = _mm_and_si128(m0, m1);
  264. if (!_mm_testc_si128(m, _mm_set1_epi32(-1)))
  265. {
  266. return 0;
  267. }
  268. #else
  269. const uint8* ptr = src + 4;
  270. for( int i=1; i<16; i++ )
  271. {
  272. if( memcmp( src, ptr, 4 ) != 0 )
  273. {
  274. return 0;
  275. }
  276. ptr += 4;
  277. }
  278. #endif
  279. return 0x02000000 |
  280. ( uint( src[0] & 0xF8 ) << 16 ) |
  281. ( uint( src[1] & 0xF8 ) << 8 ) |
  282. ( uint( src[2] & 0xF8 ) );
  283. }
  284. void PrepareAverages( v4i a[8], const uint8* src, uint err[4] )
  285. {
  286. Average( src, a );
  287. ProcessAverages( a );
  288. uint errblock[4][4];
  289. CalcErrorBlock( src, errblock );
  290. for( int i=0; i<4; i++ )
  291. {
  292. err[i/2] += CalcError( errblock[i], a[i] );
  293. err[2+i/2] += CalcError( errblock[i], a[i+4] );
  294. }
  295. }
  296. void FindBestFit( uint64 terr[2][8], uint16 tsel[16][8], v4i a[8], const uint32* id, const uint8* data )
  297. {
  298. for( size_t i=0; i<16; i++ )
  299. {
  300. uint16* sel = tsel[i];
  301. uint bid = id[i];
  302. uint64* ter = terr[bid%2];
  303. uint8 b = *data++;
  304. uint8 g = *data++;
  305. uint8 r = *data++;
  306. data++;
  307. int dr = a[bid][0] - r;
  308. int dg = a[bid][1] - g;
  309. int db = a[bid][2] - b;
  310. #ifdef __SSE4_1__
  311. // Reference implementation
  312. __m128i pix = _mm_set1_epi32(dr * 77 + dg * 151 + db * 28);
  313. // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
  314. __m128i error0 = _mm_abs_epi32(_mm_add_epi32(pix, g_table256_SIMD[0]));
  315. __m128i error1 = _mm_abs_epi32(_mm_add_epi32(pix, g_table256_SIMD[1]));
  316. __m128i error2 = _mm_abs_epi32(_mm_sub_epi32(pix, g_table256_SIMD[0]));
  317. __m128i error3 = _mm_abs_epi32(_mm_sub_epi32(pix, g_table256_SIMD[1]));
  318. __m128i index0 = _mm_and_si128(_mm_cmplt_epi32(error1, error0), _mm_set1_epi32(1));
  319. __m128i minError0 = _mm_min_epi32(error0, error1);
  320. __m128i index1 = _mm_sub_epi32(_mm_set1_epi32(2), _mm_cmplt_epi32(error3, error2));
  321. __m128i minError1 = _mm_min_epi32(error2, error3);
  322. __m128i minIndex0 = _mm_blendv_epi8(index0, index1, _mm_cmplt_epi32(minError1, minError0));
  323. __m128i minError = _mm_min_epi32(minError0, minError1);
  324. // Squaring the minimum error to produce correct values when adding
  325. __m128i minErrorLow = _mm_shuffle_epi32(minError, _MM_SHUFFLE(1, 1, 0, 0));
  326. __m128i squareErrorLow = _mm_mul_epi32(minErrorLow, minErrorLow);
  327. squareErrorLow = _mm_add_epi64(squareErrorLow, _mm_loadu_si128(((__m128i*)ter) + 0));
  328. _mm_storeu_si128(((__m128i*)ter) + 0, squareErrorLow);
  329. __m128i minErrorHigh = _mm_shuffle_epi32(minError, _MM_SHUFFLE(3, 3, 2, 2));
  330. __m128i squareErrorHigh = _mm_mul_epi32(minErrorHigh, minErrorHigh);
  331. squareErrorHigh = _mm_add_epi64(squareErrorHigh, _mm_loadu_si128(((__m128i*)ter) + 1));
  332. _mm_storeu_si128(((__m128i*)ter) + 1, squareErrorHigh);
  333. // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
  334. error0 = _mm_abs_epi32(_mm_add_epi32(pix, g_table256_SIMD[2]));
  335. error1 = _mm_abs_epi32(_mm_add_epi32(pix, g_table256_SIMD[3]));
  336. error2 = _mm_abs_epi32(_mm_sub_epi32(pix, g_table256_SIMD[2]));
  337. error3 = _mm_abs_epi32(_mm_sub_epi32(pix, g_table256_SIMD[3]));
  338. index0 = _mm_and_si128(_mm_cmplt_epi32(error1, error0), _mm_set1_epi32(1));
  339. minError0 = _mm_min_epi32(error0, error1);
  340. index1 = _mm_sub_epi32(_mm_set1_epi32(2), _mm_cmplt_epi32(error3, error2));
  341. minError1 = _mm_min_epi32(error2, error3);
  342. __m128i minIndex1 = _mm_blendv_epi8(index0, index1, _mm_cmplt_epi32(minError1, minError0));
  343. minError = _mm_min_epi32(minError0, minError1);
  344. // Squaring the minimum error to produce correct values when adding
  345. minErrorLow = _mm_shuffle_epi32(minError, _MM_SHUFFLE(1, 1, 0, 0));
  346. squareErrorLow = _mm_mul_epi32(minErrorLow, minErrorLow);
  347. squareErrorLow = _mm_add_epi64(squareErrorLow, _mm_loadu_si128(((__m128i*)ter) + 2));
  348. _mm_storeu_si128(((__m128i*)ter) + 2, squareErrorLow);
  349. minErrorHigh = _mm_shuffle_epi32(minError, _MM_SHUFFLE(3, 3, 2, 2));
  350. squareErrorHigh = _mm_mul_epi32(minErrorHigh, minErrorHigh);
  351. squareErrorHigh = _mm_add_epi64(squareErrorHigh, _mm_loadu_si128(((__m128i*)ter) + 3));
  352. _mm_storeu_si128(((__m128i*)ter) + 3, squareErrorHigh);
  353. __m128i minIndex = _mm_packs_epi32(minIndex0, minIndex1);
  354. _mm_storeu_si128((__m128i*)sel, minIndex);
  355. #else
  356. int pix = dr * 77 + dg * 151 + db * 28;
  357. for( int t=0; t<8; t++ )
  358. {
  359. const int64* tab = g_table256[t];
  360. uint idx = 0;
  361. uint64 err = sq( tab[0] + pix );
  362. for( int j=1; j<4; j++ )
  363. {
  364. uint64 local = sq( tab[j] + pix );
  365. if( local < err )
  366. {
  367. err = local;
  368. idx = j;
  369. }
  370. }
  371. *sel++ = idx;
  372. *ter++ += err;
  373. }
  374. #endif
  375. }
  376. }
  377. #ifdef __SSE4_1__
  378. // Non-reference implementation, but faster. Produces same results as the AVX2 version
  379. void FindBestFit( uint32 terr[2][8], uint16 tsel[16][8], v4i a[8], const uint32* id, const uint8* data )
  380. {
  381. for( size_t i=0; i<16; i++ )
  382. {
  383. uint16* sel = tsel[i];
  384. uint bid = id[i];
  385. uint32* ter = terr[bid%2];
  386. uint8 b = *data++;
  387. uint8 g = *data++;
  388. uint8 r = *data++;
  389. data++;
  390. int dr = a[bid][0] - r;
  391. int dg = a[bid][1] - g;
  392. int db = a[bid][2] - b;
  393. // The scaling values are divided by two and rounded, to allow the differences to be in the range of signed int16
  394. // This produces slightly different results, but is significant faster
  395. __m128i pixel = _mm_set1_epi16(dr * 38 + dg * 76 + db * 14);
  396. __m128i pix = _mm_abs_epi16(pixel);
  397. // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
  398. // Since the selector table is symmetrical, we need to calculate the difference only for half of the entries.
  399. __m128i error0 = _mm_abs_epi16(_mm_sub_epi16(pix, g_table128_SIMD[0]));
  400. __m128i error1 = _mm_abs_epi16(_mm_sub_epi16(pix, g_table128_SIMD[1]));
  401. __m128i index = _mm_and_si128(_mm_cmplt_epi16(error1, error0), _mm_set1_epi16(1));
  402. __m128i minError = _mm_min_epi16(error0, error1);
  403. // Exploiting symmetry of the selector table and use the sign bit
  404. // This produces slightly different results, but is needed to produce same results as AVX2 implementation
  405. __m128i indexBit = _mm_andnot_si128(_mm_srli_epi16(pixel, 15), _mm_set1_epi8(-1));
  406. __m128i minIndex = _mm_or_si128(index, _mm_add_epi16(indexBit, indexBit));
  407. // Squaring the minimum error to produce correct values when adding
  408. __m128i squareErrorLo = _mm_mullo_epi16(minError, minError);
  409. __m128i squareErrorHi = _mm_mulhi_epi16(minError, minError);
  410. __m128i squareErrorLow = _mm_unpacklo_epi16(squareErrorLo, squareErrorHi);
  411. __m128i squareErrorHigh = _mm_unpackhi_epi16(squareErrorLo, squareErrorHi);
  412. squareErrorLow = _mm_add_epi32(squareErrorLow, _mm_loadu_si128(((__m128i*)ter) + 0));
  413. _mm_storeu_si128(((__m128i*)ter) + 0, squareErrorLow);
  414. squareErrorHigh = _mm_add_epi32(squareErrorHigh, _mm_loadu_si128(((__m128i*)ter) + 1));
  415. _mm_storeu_si128(((__m128i*)ter) + 1, squareErrorHigh);
  416. _mm_storeu_si128((__m128i*)sel, minIndex);
  417. }
  418. }
  419. #endif
  420. uint8_t convert6(float f)
  421. {
  422. int i = (std::min(std::max(static_cast<int>(f), 0), 1023) - 15) >> 1;
  423. return (i + 11 - ((i + 11) >> 7) - ((i + 4) >> 7)) >> 3;
  424. }
  425. uint8_t convert7(float f)
  426. {
  427. int i = (std::min(std::max(static_cast<int>(f), 0), 1023) - 15) >> 1;
  428. return (i + 9 - ((i + 9) >> 8) - ((i + 6) >> 8)) >> 2;
  429. }
  430. std::pair<uint64, uint64> Planar(const uint8* src)
  431. {
  432. int32 r = 0;
  433. int32 g = 0;
  434. int32 b = 0;
  435. for (int i = 0; i < 16; ++i)
  436. {
  437. b += src[i * 4 + 0];
  438. g += src[i * 4 + 1];
  439. r += src[i * 4 + 2];
  440. }
  441. int32 difRyz = 0;
  442. int32 difGyz = 0;
  443. int32 difByz = 0;
  444. int32 difRxz = 0;
  445. int32 difGxz = 0;
  446. int32 difBxz = 0;
  447. const int32 scaling[] = { -255, -85, 85, 255 };
  448. for (int i = 0; i < 16; ++i)
  449. {
  450. int32 difB = (static_cast<int>(src[i * 4 + 0]) << 4) - b;
  451. int32 difG = (static_cast<int>(src[i * 4 + 1]) << 4) - g;
  452. int32 difR = (static_cast<int>(src[i * 4 + 2]) << 4) - r;
  453. difRyz += difR * scaling[i % 4];
  454. difGyz += difG * scaling[i % 4];
  455. difByz += difB * scaling[i % 4];
  456. difRxz += difR * scaling[i / 4];
  457. difGxz += difG * scaling[i / 4];
  458. difBxz += difB * scaling[i / 4];
  459. }
  460. const float scale = -4.0f / ((255 * 255 * 8.0f + 85 * 85 * 8.0f) * 16.0f);
  461. float aR = difRxz * scale;
  462. float aG = difGxz * scale;
  463. float aB = difBxz * scale;
  464. float bR = difRyz * scale;
  465. float bG = difGyz * scale;
  466. float bB = difByz * scale;
  467. float dR = r * (4.0f / 16.0f);
  468. float dG = g * (4.0f / 16.0f);
  469. float dB = b * (4.0f / 16.0f);
  470. // calculating the three colors RGBO, RGBH, and RGBV. RGB = df - af * x - bf * y;
  471. float cofR = (aR * 255.0f + (bR * 255.0f + dR));
  472. float cofG = (aG * 255.0f + (bG * 255.0f + dG));
  473. float cofB = (aB * 255.0f + (bB * 255.0f + dB));
  474. float chfR = (aR * -425.0f + (bR * 255.0f + dR));
  475. float chfG = (aG * -425.0f + (bG * 255.0f + dG));
  476. float chfB = (aB * -425.0f + (bB * 255.0f + dB));
  477. float cvfR = (aR * 255.0f + (bR * -425.0f + dR));
  478. float cvfG = (aG * 255.0f + (bG * -425.0f + dG));
  479. float cvfB = (aB * 255.0f + (bB * -425.0f + dB));
  480. // convert to r6g7b6
  481. int32 coR = convert6(cofR);
  482. int32 coG = convert7(cofG);
  483. int32 coB = convert6(cofB);
  484. int32 chR = convert6(chfR);
  485. int32 chG = convert7(chfG);
  486. int32 chB = convert6(chfB);
  487. int32 cvR = convert6(cvfR);
  488. int32 cvG = convert7(cvfG);
  489. int32 cvB = convert6(cvfB);
  490. // Error calculation
  491. int32 ro0 = coR;
  492. int32 go0 = coG;
  493. int32 bo0 = coB;
  494. int32 ro1 = (ro0 >> 4) | (ro0 << 2);
  495. int32 go1 = (go0 >> 6) | (go0 << 1);
  496. int32 bo1 = (bo0 >> 4) | (bo0 << 2);
  497. int32 ro2 = (ro1 << 2) + 2;
  498. int32 go2 = (go1 << 2) + 2;
  499. int32 bo2 = (bo1 << 2) + 2;
  500. int32 rh0 = chR;
  501. int32 gh0 = chG;
  502. int32 bh0 = chB;
  503. int32 rh1 = (rh0 >> 4) | (rh0 << 2);
  504. int32 gh1 = (gh0 >> 6) | (gh0 << 1);
  505. int32 bh1 = (bh0 >> 4) | (bh0 << 2);
  506. int32 rh2 = rh1 - ro1;
  507. int32 gh2 = gh1 - go1;
  508. int32 bh2 = bh1 - bo1;
  509. int32 rv0 = cvR;
  510. int32 gv0 = cvG;
  511. int32 bv0 = cvB;
  512. int32 rv1 = (rv0 >> 4) | (rv0 << 2);
  513. int32 gv1 = (gv0 >> 6) | (gv0 << 1);
  514. int32 bv1 = (bv0 >> 4) | (bv0 << 2);
  515. int32 rv2 = rv1 - ro1;
  516. int32 gv2 = gv1 - go1;
  517. int32 bv2 = bv1 - bo1;
  518. uint64 error = 0;
  519. for (int i = 0; i < 16; ++i)
  520. {
  521. int32 cR = clampu8((rh2 * (i / 4) + rv2 * (i % 4) + ro2) >> 2);
  522. int32 cG = clampu8((gh2 * (i / 4) + gv2 * (i % 4) + go2) >> 2);
  523. int32 cB = clampu8((bh2 * (i / 4) + bv2 * (i % 4) + bo2) >> 2);
  524. int32 difB = static_cast<int>(src[i * 4 + 0]) - cB;
  525. int32 difG = static_cast<int>(src[i * 4 + 1]) - cG;
  526. int32 difR = static_cast<int>(src[i * 4 + 2]) - cR;
  527. int32 dif = difR * 38 + difG * 76 + difB * 14;
  528. error += dif * dif;
  529. }
  530. /**/
  531. uint32 rgbv = cvB | (cvG << 6) | (cvR << 13);
  532. uint32 rgbh = chB | (chG << 6) | (chR << 13);
  533. uint32 hi = rgbv | ((rgbh & 0x1FFF) << 19);
  534. uint32 lo = (chR & 0x1) | 0x2 | ((chR << 1) & 0x7C);
  535. lo |= ((coB & 0x07) << 7) | ((coB & 0x18) << 8) | ((coB & 0x20) << 11);
  536. lo |= ((coG & 0x3F) << 17) | ((coG & 0x40) << 18);
  537. lo |= coR << 25;
  538. const int32 idx = (coR & 0x20) | ((coG & 0x20) >> 1) | ((coB & 0x1E) >> 1);
  539. lo |= g_flags[idx];
  540. uint64 result = static_cast<uint32>(bx::endianSwap(lo));
  541. result |= static_cast<uint64>(static_cast<uint32>(bx::endianSwap(hi))) << 32;
  542. return std::make_pair(result, error);
  543. }
  544. template<class T, class S>
  545. uint64 EncodeSelectors( uint64 d, const T terr[2][8], const S tsel[16][8], const uint32* id, const uint64 value, const uint64 error)
  546. {
  547. size_t tidx[2];
  548. tidx[0] = GetLeastError( terr[0], 8 );
  549. tidx[1] = GetLeastError( terr[1], 8 );
  550. if ((terr[0][tidx[0]] + terr[1][tidx[1]]) >= error)
  551. {
  552. return value;
  553. }
  554. d |= tidx[0] << 26;
  555. d |= tidx[1] << 29;
  556. for( int i=0; i<16; i++ )
  557. {
  558. uint64 t = tsel[i][tidx[id[i]%2]];
  559. d |= ( t & 0x1 ) << ( i + 32 );
  560. d |= ( t & 0x2 ) << ( i + 47 );
  561. }
  562. return FixByteOrder(d);
  563. }
  564. }
  565. uint64 ProcessRGB( const uint8* src )
  566. {
  567. uint64 d = CheckSolid( src );
  568. if( d != 0 ) return d;
  569. v4i a[8];
  570. uint err[4] = {};
  571. PrepareAverages( a, src, err );
  572. size_t idx = GetLeastError( err, 4 );
  573. EncodeAverages( d, a, idx );
  574. #if defined __SSE4_1__ && !defined REFERENCE_IMPLEMENTATION
  575. uint32 terr[2][8] = {};
  576. #else
  577. uint64 terr[2][8] = {};
  578. #endif
  579. uint16 tsel[16][8];
  580. const uint32* id = g_id[idx];
  581. FindBestFit( terr, tsel, a, id, src );
  582. return FixByteOrder( EncodeSelectors( d, terr, tsel, id ) );
  583. }
  584. uint64 ProcessRGB_ETC2( const uint8* src )
  585. {
  586. std::pair<uint64, uint64> result = Planar( src );
  587. uint64 d = 0;
  588. v4i a[8];
  589. uint err[4] = {};
  590. PrepareAverages( a, src, err );
  591. size_t idx = GetLeastError( err, 4 );
  592. EncodeAverages( d, a, idx );
  593. uint64 terr[2][8] = {};
  594. uint16 tsel[16][8];
  595. const uint32* id = g_id[idx];
  596. FindBestFit( terr, tsel, a, id, src );
  597. return EncodeSelectors( d, terr, tsel, id, result.first, result.second );
  598. }