basisu_gpu_texture.cpp 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622
  1. // basisu_gpu_texture.cpp
  2. // Copyright (C) 2019-2021 Binomial LLC. All Rights Reserved.
  3. //
  4. // Licensed under the Apache License, Version 2.0 (the "License");
  5. // you may not use this file except in compliance with the License.
  6. // You may obtain a copy of the License at
  7. //
  8. // http://www.apache.org/licenses/LICENSE-2.0
  9. //
  10. // Unless required by applicable law or agreed to in writing, software
  11. // distributed under the License is distributed on an "AS IS" BASIS,
  12. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. // See the License for the specific language governing permissions and
  14. // limitations under the License.
  15. #include "basisu_gpu_texture.h"
  16. #include "basisu_enc.h"
  17. #include "basisu_pvrtc1_4.h"
  18. #include "basisu_astc_decomp.h"
  19. #include "basisu_bc7enc.h"
  20. namespace basisu
  21. {
  22. void unpack_etc2_eac(const void *pBlock_bits, color_rgba *pPixels)
  23. {
  24. static_assert(sizeof(eac_a8_block) == 8, "sizeof(eac_a8_block) == 8");
  25. const eac_a8_block *pBlock = static_cast<const eac_a8_block *>(pBlock_bits);
  26. const int8_t *pTable = g_etc2_eac_tables[pBlock->m_table];
  27. const uint64_t selector_bits = pBlock->get_selector_bits();
  28. const int32_t base = pBlock->m_base;
  29. const int32_t mul = pBlock->m_multiplier;
  30. pPixels[0].a = clamp255(base + pTable[pBlock->get_selector(0, 0, selector_bits)] * mul);
  31. pPixels[1].a = clamp255(base + pTable[pBlock->get_selector(1, 0, selector_bits)] * mul);
  32. pPixels[2].a = clamp255(base + pTable[pBlock->get_selector(2, 0, selector_bits)] * mul);
  33. pPixels[3].a = clamp255(base + pTable[pBlock->get_selector(3, 0, selector_bits)] * mul);
  34. pPixels[4].a = clamp255(base + pTable[pBlock->get_selector(0, 1, selector_bits)] * mul);
  35. pPixels[5].a = clamp255(base + pTable[pBlock->get_selector(1, 1, selector_bits)] * mul);
  36. pPixels[6].a = clamp255(base + pTable[pBlock->get_selector(2, 1, selector_bits)] * mul);
  37. pPixels[7].a = clamp255(base + pTable[pBlock->get_selector(3, 1, selector_bits)] * mul);
  38. pPixels[8].a = clamp255(base + pTable[pBlock->get_selector(0, 2, selector_bits)] * mul);
  39. pPixels[9].a = clamp255(base + pTable[pBlock->get_selector(1, 2, selector_bits)] * mul);
  40. pPixels[10].a = clamp255(base + pTable[pBlock->get_selector(2, 2, selector_bits)] * mul);
  41. pPixels[11].a = clamp255(base + pTable[pBlock->get_selector(3, 2, selector_bits)] * mul);
  42. pPixels[12].a = clamp255(base + pTable[pBlock->get_selector(0, 3, selector_bits)] * mul);
  43. pPixels[13].a = clamp255(base + pTable[pBlock->get_selector(1, 3, selector_bits)] * mul);
  44. pPixels[14].a = clamp255(base + pTable[pBlock->get_selector(2, 3, selector_bits)] * mul);
  45. pPixels[15].a = clamp255(base + pTable[pBlock->get_selector(3, 3, selector_bits)] * mul);
  46. }
  47. struct bc1_block
  48. {
  49. enum { cTotalEndpointBytes = 2, cTotalSelectorBytes = 4 };
  50. uint8_t m_low_color[cTotalEndpointBytes];
  51. uint8_t m_high_color[cTotalEndpointBytes];
  52. uint8_t m_selectors[cTotalSelectorBytes];
  53. inline uint32_t get_high_color() const { return m_high_color[0] | (m_high_color[1] << 8U); }
  54. inline uint32_t get_low_color() const { return m_low_color[0] | (m_low_color[1] << 8U); }
  55. static void unpack_color(uint32_t c, uint32_t &r, uint32_t &g, uint32_t &b)
  56. {
  57. r = (c >> 11) & 31;
  58. g = (c >> 5) & 63;
  59. b = c & 31;
  60. r = (r << 3) | (r >> 2);
  61. g = (g << 2) | (g >> 4);
  62. b = (b << 3) | (b >> 2);
  63. }
  64. inline uint32_t get_selector(uint32_t x, uint32_t y) const { assert((x < 4U) && (y < 4U)); return (m_selectors[y] >> (x * 2)) & 3; }
  65. };
  66. // Returns true if the block uses 3 color punchthrough alpha mode.
  67. bool unpack_bc1(const void *pBlock_bits, color_rgba *pPixels, bool set_alpha)
  68. {
  69. static_assert(sizeof(bc1_block) == 8, "sizeof(bc1_block) == 8");
  70. const bc1_block *pBlock = static_cast<const bc1_block *>(pBlock_bits);
  71. const uint32_t l = pBlock->get_low_color();
  72. const uint32_t h = pBlock->get_high_color();
  73. color_rgba c[4];
  74. uint32_t r0, g0, b0, r1, g1, b1;
  75. bc1_block::unpack_color(l, r0, g0, b0);
  76. bc1_block::unpack_color(h, r1, g1, b1);
  77. c[0].set_noclamp_rgba(r0, g0, b0, 255);
  78. c[1].set_noclamp_rgba(r1, g1, b1, 255);
  79. bool used_punchthrough = false;
  80. if (l > h)
  81. {
  82. c[2].set_noclamp_rgba((r0 * 2 + r1) / 3, (g0 * 2 + g1) / 3, (b0 * 2 + b1) / 3, 255);
  83. c[3].set_noclamp_rgba((r1 * 2 + r0) / 3, (g1 * 2 + g0) / 3, (b1 * 2 + b0) / 3, 255);
  84. }
  85. else
  86. {
  87. c[2].set_noclamp_rgba((r0 + r1) / 2, (g0 + g1) / 2, (b0 + b1) / 2, 255);
  88. c[3].set_noclamp_rgba(0, 0, 0, 0);
  89. used_punchthrough = true;
  90. }
  91. if (set_alpha)
  92. {
  93. for (uint32_t y = 0; y < 4; y++, pPixels += 4)
  94. {
  95. pPixels[0] = c[pBlock->get_selector(0, y)];
  96. pPixels[1] = c[pBlock->get_selector(1, y)];
  97. pPixels[2] = c[pBlock->get_selector(2, y)];
  98. pPixels[3] = c[pBlock->get_selector(3, y)];
  99. }
  100. }
  101. else
  102. {
  103. for (uint32_t y = 0; y < 4; y++, pPixels += 4)
  104. {
  105. pPixels[0].set_rgb(c[pBlock->get_selector(0, y)]);
  106. pPixels[1].set_rgb(c[pBlock->get_selector(1, y)]);
  107. pPixels[2].set_rgb(c[pBlock->get_selector(2, y)]);
  108. pPixels[3].set_rgb(c[pBlock->get_selector(3, y)]);
  109. }
  110. }
  111. return used_punchthrough;
  112. }
  113. bool unpack_bc1_nv(const void *pBlock_bits, color_rgba *pPixels, bool set_alpha)
  114. {
  115. static_assert(sizeof(bc1_block) == 8, "sizeof(bc1_block) == 8");
  116. const bc1_block *pBlock = static_cast<const bc1_block *>(pBlock_bits);
  117. const uint32_t l = pBlock->get_low_color();
  118. const uint32_t h = pBlock->get_high_color();
  119. color_rgba c[4];
  120. int r0 = (l >> 11) & 31;
  121. int g0 = (l >> 5) & 63;
  122. int b0 = l & 31;
  123. int r1 = (h >> 11) & 31;
  124. int g1 = (h >> 5) & 63;
  125. int b1 = h & 31;
  126. c[0].b = (uint8_t)((3 * b0 * 22) / 8);
  127. c[0].g = (uint8_t)((g0 << 2) | (g0 >> 4));
  128. c[0].r = (uint8_t)((3 * r0 * 22) / 8);
  129. c[0].a = 0xFF;
  130. c[1].r = (uint8_t)((3 * r1 * 22) / 8);
  131. c[1].g = (uint8_t)((g1 << 2) | (g1 >> 4));
  132. c[1].b = (uint8_t)((3 * b1 * 22) / 8);
  133. c[1].a = 0xFF;
  134. int gdiff = c[1].g - c[0].g;
  135. bool used_punchthrough = false;
  136. if (l > h)
  137. {
  138. c[2].r = (uint8_t)(((2 * r0 + r1) * 22) / 8);
  139. c[2].g = (uint8_t)(((256 * c[0].g + gdiff/4 + 128 + gdiff * 80) / 256));
  140. c[2].b = (uint8_t)(((2 * b0 + b1) * 22) / 8);
  141. c[2].a = 0xFF;
  142. c[3].r = (uint8_t)(((2 * r1 + r0) * 22) / 8);
  143. c[3].g = (uint8_t)((256 * c[1].g - gdiff/4 + 128 - gdiff * 80) / 256);
  144. c[3].b = (uint8_t)(((2 * b1 + b0) * 22) / 8);
  145. c[3].a = 0xFF;
  146. }
  147. else
  148. {
  149. c[2].r = (uint8_t)(((r0 + r1) * 33) / 8);
  150. c[2].g = (uint8_t)((256 * c[0].g + gdiff/4 + 128 + gdiff * 128) / 256);
  151. c[2].b = (uint8_t)(((b0 + b1) * 33) / 8);
  152. c[2].a = 0xFF;
  153. c[3].set_noclamp_rgba(0, 0, 0, 0);
  154. used_punchthrough = true;
  155. }
  156. if (set_alpha)
  157. {
  158. for (uint32_t y = 0; y < 4; y++, pPixels += 4)
  159. {
  160. pPixels[0] = c[pBlock->get_selector(0, y)];
  161. pPixels[1] = c[pBlock->get_selector(1, y)];
  162. pPixels[2] = c[pBlock->get_selector(2, y)];
  163. pPixels[3] = c[pBlock->get_selector(3, y)];
  164. }
  165. }
  166. else
  167. {
  168. for (uint32_t y = 0; y < 4; y++, pPixels += 4)
  169. {
  170. pPixels[0].set_rgb(c[pBlock->get_selector(0, y)]);
  171. pPixels[1].set_rgb(c[pBlock->get_selector(1, y)]);
  172. pPixels[2].set_rgb(c[pBlock->get_selector(2, y)]);
  173. pPixels[3].set_rgb(c[pBlock->get_selector(3, y)]);
  174. }
  175. }
  176. return used_punchthrough;
  177. }
  178. static inline int interp_5_6_amd(int c0, int c1) { assert(c0 < 256 && c1 < 256); return (c0 * 43 + c1 * 21 + 32) >> 6; }
  179. static inline int interp_half_5_6_amd(int c0, int c1) { assert(c0 < 256 && c1 < 256); return (c0 + c1 + 1) >> 1; }
  180. bool unpack_bc1_amd(const void *pBlock_bits, color_rgba *pPixels, bool set_alpha)
  181. {
  182. const bc1_block *pBlock = static_cast<const bc1_block *>(pBlock_bits);
  183. const uint32_t l = pBlock->get_low_color();
  184. const uint32_t h = pBlock->get_high_color();
  185. color_rgba c[4];
  186. uint32_t r0, g0, b0, r1, g1, b1;
  187. bc1_block::unpack_color(l, r0, g0, b0);
  188. bc1_block::unpack_color(h, r1, g1, b1);
  189. c[0].set_noclamp_rgba(r0, g0, b0, 255);
  190. c[1].set_noclamp_rgba(r1, g1, b1, 255);
  191. bool used_punchthrough = false;
  192. if (l > h)
  193. {
  194. c[2].set_noclamp_rgba(interp_5_6_amd(r0, r1), interp_5_6_amd(g0, g1), interp_5_6_amd(b0, b1), 255);
  195. c[3].set_noclamp_rgba(interp_5_6_amd(r1, r0), interp_5_6_amd(g1, g0), interp_5_6_amd(b1, b0), 255);
  196. }
  197. else
  198. {
  199. c[2].set_noclamp_rgba(interp_half_5_6_amd(r0, r1), interp_half_5_6_amd(g0, g1), interp_half_5_6_amd(b0, b1), 255);
  200. c[3].set_noclamp_rgba(0, 0, 0, 0);
  201. used_punchthrough = true;
  202. }
  203. if (set_alpha)
  204. {
  205. for (uint32_t y = 0; y < 4; y++, pPixels += 4)
  206. {
  207. pPixels[0] = c[pBlock->get_selector(0, y)];
  208. pPixels[1] = c[pBlock->get_selector(1, y)];
  209. pPixels[2] = c[pBlock->get_selector(2, y)];
  210. pPixels[3] = c[pBlock->get_selector(3, y)];
  211. }
  212. }
  213. else
  214. {
  215. for (uint32_t y = 0; y < 4; y++, pPixels += 4)
  216. {
  217. pPixels[0].set_rgb(c[pBlock->get_selector(0, y)]);
  218. pPixels[1].set_rgb(c[pBlock->get_selector(1, y)]);
  219. pPixels[2].set_rgb(c[pBlock->get_selector(2, y)]);
  220. pPixels[3].set_rgb(c[pBlock->get_selector(3, y)]);
  221. }
  222. }
  223. return used_punchthrough;
  224. }
  225. struct bc4_block
  226. {
  227. enum { cBC4SelectorBits = 3, cTotalSelectorBytes = 6, cMaxSelectorValues = 8 };
  228. uint8_t m_endpoints[2];
  229. uint8_t m_selectors[cTotalSelectorBytes];
  230. inline uint32_t get_low_alpha() const { return m_endpoints[0]; }
  231. inline uint32_t get_high_alpha() const { return m_endpoints[1]; }
  232. inline bool is_alpha6_block() const { return get_low_alpha() <= get_high_alpha(); }
  233. inline uint64_t get_selector_bits() const
  234. {
  235. return ((uint64_t)((uint32_t)m_selectors[0] | ((uint32_t)m_selectors[1] << 8U) | ((uint32_t)m_selectors[2] << 16U) | ((uint32_t)m_selectors[3] << 24U))) |
  236. (((uint64_t)m_selectors[4]) << 32U) |
  237. (((uint64_t)m_selectors[5]) << 40U);
  238. }
  239. inline uint32_t get_selector(uint32_t x, uint32_t y, uint64_t selector_bits) const
  240. {
  241. assert((x < 4U) && (y < 4U));
  242. return (selector_bits >> (((y * 4) + x) * cBC4SelectorBits)) & (cMaxSelectorValues - 1);
  243. }
  244. static inline uint32_t get_block_values6(uint8_t *pDst, uint32_t l, uint32_t h)
  245. {
  246. pDst[0] = static_cast<uint8_t>(l);
  247. pDst[1] = static_cast<uint8_t>(h);
  248. pDst[2] = static_cast<uint8_t>((l * 4 + h) / 5);
  249. pDst[3] = static_cast<uint8_t>((l * 3 + h * 2) / 5);
  250. pDst[4] = static_cast<uint8_t>((l * 2 + h * 3) / 5);
  251. pDst[5] = static_cast<uint8_t>((l + h * 4) / 5);
  252. pDst[6] = 0;
  253. pDst[7] = 255;
  254. return 6;
  255. }
  256. static inline uint32_t get_block_values8(uint8_t *pDst, uint32_t l, uint32_t h)
  257. {
  258. pDst[0] = static_cast<uint8_t>(l);
  259. pDst[1] = static_cast<uint8_t>(h);
  260. pDst[2] = static_cast<uint8_t>((l * 6 + h) / 7);
  261. pDst[3] = static_cast<uint8_t>((l * 5 + h * 2) / 7);
  262. pDst[4] = static_cast<uint8_t>((l * 4 + h * 3) / 7);
  263. pDst[5] = static_cast<uint8_t>((l * 3 + h * 4) / 7);
  264. pDst[6] = static_cast<uint8_t>((l * 2 + h * 5) / 7);
  265. pDst[7] = static_cast<uint8_t>((l + h * 6) / 7);
  266. return 8;
  267. }
  268. static inline uint32_t get_block_values(uint8_t *pDst, uint32_t l, uint32_t h)
  269. {
  270. if (l > h)
  271. return get_block_values8(pDst, l, h);
  272. else
  273. return get_block_values6(pDst, l, h);
  274. }
  275. };
  276. void unpack_bc4(const void *pBlock_bits, uint8_t *pPixels, uint32_t stride)
  277. {
  278. static_assert(sizeof(bc4_block) == 8, "sizeof(bc4_block) == 8");
  279. const bc4_block *pBlock = static_cast<const bc4_block *>(pBlock_bits);
  280. uint8_t sel_values[8];
  281. bc4_block::get_block_values(sel_values, pBlock->get_low_alpha(), pBlock->get_high_alpha());
  282. const uint64_t selector_bits = pBlock->get_selector_bits();
  283. for (uint32_t y = 0; y < 4; y++, pPixels += (stride * 4U))
  284. {
  285. pPixels[0] = sel_values[pBlock->get_selector(0, y, selector_bits)];
  286. pPixels[stride * 1] = sel_values[pBlock->get_selector(1, y, selector_bits)];
  287. pPixels[stride * 2] = sel_values[pBlock->get_selector(2, y, selector_bits)];
  288. pPixels[stride * 3] = sel_values[pBlock->get_selector(3, y, selector_bits)];
  289. }
  290. }
  291. // Returns false if the block uses 3-color punchthrough alpha mode, which isn't supported on some GPU's for BC3.
  292. bool unpack_bc3(const void *pBlock_bits, color_rgba *pPixels)
  293. {
  294. bool success = true;
  295. if (unpack_bc1((const uint8_t *)pBlock_bits + sizeof(bc4_block), pPixels, true))
  296. success = false;
  297. unpack_bc4(pBlock_bits, &pPixels[0].a, sizeof(color_rgba));
  298. return success;
  299. }
  300. // writes RG
  301. void unpack_bc5(const void *pBlock_bits, color_rgba *pPixels)
  302. {
  303. unpack_bc4(pBlock_bits, &pPixels[0].r, sizeof(color_rgba));
  304. unpack_bc4((const uint8_t *)pBlock_bits + sizeof(bc4_block), &pPixels[0].g, sizeof(color_rgba));
  305. }
  306. // ATC isn't officially documented, so I'm assuming these references:
  307. // http://www.guildsoftware.com/papers/2012.Converting.DXTC.to.ATC.pdf
  308. // https://github.com/Triang3l/S3TConv/blob/master/s3tconv_atitc.c
  309. // The paper incorrectly says the ATC lerp factors are 1/3 and 2/3, but they are actually 3/8 and 5/8.
  310. void unpack_atc(const void* pBlock_bits, color_rgba* pPixels)
  311. {
  312. const uint8_t* pBytes = static_cast<const uint8_t*>(pBlock_bits);
  313. const uint16_t color0 = pBytes[0] | (pBytes[1] << 8U);
  314. const uint16_t color1 = pBytes[2] | (pBytes[3] << 8U);
  315. uint32_t sels = pBytes[4] | (pBytes[5] << 8U) | (pBytes[6] << 16U) | (pBytes[7] << 24U);
  316. const bool mode = (color0 & 0x8000) != 0;
  317. color_rgba c[4];
  318. c[0].set((color0 >> 10) & 31, (color0 >> 5) & 31, color0 & 31, 255);
  319. c[0].r = (c[0].r << 3) | (c[0].r >> 2);
  320. c[0].g = (c[0].g << 3) | (c[0].g >> 2);
  321. c[0].b = (c[0].b << 3) | (c[0].b >> 2);
  322. c[3].set((color1 >> 11) & 31, (color1 >> 5) & 63, color1 & 31, 255);
  323. c[3].r = (c[3].r << 3) | (c[3].r >> 2);
  324. c[3].g = (c[3].g << 2) | (c[3].g >> 4);
  325. c[3].b = (c[3].b << 3) | (c[3].b >> 2);
  326. if (mode)
  327. {
  328. c[1].set(basisu::maximum(0, c[0].r - (c[3].r >> 2)), basisu::maximum(0, c[0].g - (c[3].g >> 2)), basisu::maximum(0, c[0].b - (c[3].b >> 2)), 255);
  329. c[2] = c[0];
  330. c[0].set(0, 0, 0, 255);
  331. }
  332. else
  333. {
  334. c[1].r = (c[0].r * 5 + c[3].r * 3) >> 3;
  335. c[1].g = (c[0].g * 5 + c[3].g * 3) >> 3;
  336. c[1].b = (c[0].b * 5 + c[3].b * 3) >> 3;
  337. c[2].r = (c[0].r * 3 + c[3].r * 5) >> 3;
  338. c[2].g = (c[0].g * 3 + c[3].g * 5) >> 3;
  339. c[2].b = (c[0].b * 3 + c[3].b * 5) >> 3;
  340. }
  341. for (uint32_t i = 0; i < 16; i++)
  342. {
  343. const uint32_t s = sels & 3;
  344. pPixels[i] = c[s];
  345. sels >>= 2;
  346. }
  347. }
  348. // BC7 mode 0-7 decompression.
  349. // Instead of one monster routine to unpack all the BC7 modes, we're lumping the 3 subset, 2 subset, 1 subset, and dual plane modes together into simple shared routines.
  350. static inline uint32_t bc7_dequant(uint32_t val, uint32_t pbit, uint32_t val_bits) { assert(val < (1U << val_bits)); assert(pbit < 2); assert(val_bits >= 4 && val_bits <= 8); const uint32_t total_bits = val_bits + 1; val = (val << 1) | pbit; val <<= (8 - total_bits); val |= (val >> total_bits); assert(val <= 255); return val; }
  351. static inline uint32_t bc7_dequant(uint32_t val, uint32_t val_bits) { assert(val < (1U << val_bits)); assert(val_bits >= 4 && val_bits <= 8); val <<= (8 - val_bits); val |= (val >> val_bits); assert(val <= 255); return val; }
  352. static inline uint32_t bc7_interp2(uint32_t l, uint32_t h, uint32_t w) { assert(w < 4); return (l * (64 - basist::g_bc7_weights2[w]) + h * basist::g_bc7_weights2[w] + 32) >> 6; }
  353. static inline uint32_t bc7_interp3(uint32_t l, uint32_t h, uint32_t w) { assert(w < 8); return (l * (64 - basist::g_bc7_weights3[w]) + h * basist::g_bc7_weights3[w] + 32) >> 6; }
  354. static inline uint32_t bc7_interp4(uint32_t l, uint32_t h, uint32_t w) { assert(w < 16); return (l * (64 - basist::g_bc7_weights4[w]) + h * basist::g_bc7_weights4[w] + 32) >> 6; }
  355. static inline uint32_t bc7_interp(uint32_t l, uint32_t h, uint32_t w, uint32_t bits)
  356. {
  357. assert(l <= 255 && h <= 255);
  358. switch (bits)
  359. {
  360. case 2: return bc7_interp2(l, h, w);
  361. case 3: return bc7_interp3(l, h, w);
  362. case 4: return bc7_interp4(l, h, w);
  363. default:
  364. break;
  365. }
  366. return 0;
  367. }
  368. bool unpack_bc7_mode0_2(uint32_t mode, const void* pBlock_bits, color_rgba* pPixels)
  369. {
  370. //const uint32_t SUBSETS = 3;
  371. const uint32_t ENDPOINTS = 6;
  372. const uint32_t COMPS = 3;
  373. const uint32_t WEIGHT_BITS = (mode == 0) ? 3 : 2;
  374. const uint32_t ENDPOINT_BITS = (mode == 0) ? 4 : 5;
  375. const uint32_t PBITS = (mode == 0) ? 6 : 0;
  376. const uint32_t WEIGHT_VALS = 1 << WEIGHT_BITS;
  377. uint32_t bit_offset = 0;
  378. const uint8_t* pBuf = static_cast<const uint8_t*>(pBlock_bits);
  379. if (read_bits32(pBuf, bit_offset, mode + 1) != (1U << mode)) return false;
  380. const uint32_t part = read_bits32(pBuf, bit_offset, (mode == 0) ? 4 : 6);
  381. color_rgba endpoints[ENDPOINTS];
  382. for (uint32_t c = 0; c < COMPS; c++)
  383. for (uint32_t e = 0; e < ENDPOINTS; e++)
  384. endpoints[e][c] = (uint8_t)read_bits32(pBuf, bit_offset, ENDPOINT_BITS);
  385. uint32_t pbits[6];
  386. for (uint32_t p = 0; p < PBITS; p++)
  387. pbits[p] = read_bits32(pBuf, bit_offset, 1);
  388. uint32_t weights[16];
  389. for (uint32_t i = 0; i < 16; i++)
  390. weights[i] = read_bits32(pBuf, bit_offset, ((!i) || (i == basist::g_bc7_table_anchor_index_third_subset_1[part]) || (i == basist::g_bc7_table_anchor_index_third_subset_2[part])) ? (WEIGHT_BITS - 1) : WEIGHT_BITS);
  391. assert(bit_offset == 128);
  392. for (uint32_t e = 0; e < ENDPOINTS; e++)
  393. for (uint32_t c = 0; c < 4; c++)
  394. endpoints[e][c] = (uint8_t)((c == 3) ? 255 : (PBITS ? bc7_dequant(endpoints[e][c], pbits[e], ENDPOINT_BITS) : bc7_dequant(endpoints[e][c], ENDPOINT_BITS)));
  395. color_rgba block_colors[3][8];
  396. for (uint32_t s = 0; s < 3; s++)
  397. for (uint32_t i = 0; i < WEIGHT_VALS; i++)
  398. {
  399. for (uint32_t c = 0; c < 3; c++)
  400. block_colors[s][i][c] = (uint8_t)bc7_interp(endpoints[s * 2 + 0][c], endpoints[s * 2 + 1][c], i, WEIGHT_BITS);
  401. block_colors[s][i][3] = 255;
  402. }
  403. for (uint32_t i = 0; i < 16; i++)
  404. pPixels[i] = block_colors[basist::g_bc7_partition3[part * 16 + i]][weights[i]];
  405. return true;
  406. }
  407. bool unpack_bc7_mode1_3_7(uint32_t mode, const void* pBlock_bits, color_rgba* pPixels)
  408. {
  409. //const uint32_t SUBSETS = 2;
  410. const uint32_t ENDPOINTS = 4;
  411. const uint32_t COMPS = (mode == 7) ? 4 : 3;
  412. const uint32_t WEIGHT_BITS = (mode == 1) ? 3 : 2;
  413. const uint32_t ENDPOINT_BITS = (mode == 7) ? 5 : ((mode == 1) ? 6 : 7);
  414. const uint32_t PBITS = (mode == 1) ? 2 : 4;
  415. const uint32_t SHARED_PBITS = (mode == 1) ? true : false;
  416. const uint32_t WEIGHT_VALS = 1 << WEIGHT_BITS;
  417. uint32_t bit_offset = 0;
  418. const uint8_t* pBuf = static_cast<const uint8_t*>(pBlock_bits);
  419. if (read_bits32(pBuf, bit_offset, mode + 1) != (1U << mode)) return false;
  420. const uint32_t part = read_bits32(pBuf, bit_offset, 6);
  421. color_rgba endpoints[ENDPOINTS];
  422. for (uint32_t c = 0; c < COMPS; c++)
  423. for (uint32_t e = 0; e < ENDPOINTS; e++)
  424. endpoints[e][c] = (uint8_t)read_bits32(pBuf, bit_offset, ENDPOINT_BITS);
  425. uint32_t pbits[4];
  426. for (uint32_t p = 0; p < PBITS; p++)
  427. pbits[p] = read_bits32(pBuf, bit_offset, 1);
  428. uint32_t weights[16];
  429. for (uint32_t i = 0; i < 16; i++)
  430. weights[i] = read_bits32(pBuf, bit_offset, ((!i) || (i == basist::g_bc7_table_anchor_index_second_subset[part])) ? (WEIGHT_BITS - 1) : WEIGHT_BITS);
  431. assert(bit_offset == 128);
  432. for (uint32_t e = 0; e < ENDPOINTS; e++)
  433. for (uint32_t c = 0; c < 4; c++)
  434. endpoints[e][c] = (uint8_t)((c == ((mode == 7U) ? 4U : 3U)) ? 255 : bc7_dequant(endpoints[e][c], pbits[SHARED_PBITS ? (e >> 1) : e], ENDPOINT_BITS));
  435. color_rgba block_colors[2][8];
  436. for (uint32_t s = 0; s < 2; s++)
  437. for (uint32_t i = 0; i < WEIGHT_VALS; i++)
  438. {
  439. for (uint32_t c = 0; c < COMPS; c++)
  440. block_colors[s][i][c] = (uint8_t)bc7_interp(endpoints[s * 2 + 0][c], endpoints[s * 2 + 1][c], i, WEIGHT_BITS);
  441. block_colors[s][i][3] = (COMPS == 3) ? 255 : block_colors[s][i][3];
  442. }
  443. for (uint32_t i = 0; i < 16; i++)
  444. pPixels[i] = block_colors[basist::g_bc7_partition2[part * 16 + i]][weights[i]];
  445. return true;
  446. }
  447. bool unpack_bc7_mode4_5(uint32_t mode, const void* pBlock_bits, color_rgba* pPixels)
  448. {
  449. const uint32_t ENDPOINTS = 2;
  450. const uint32_t COMPS = 4;
  451. const uint32_t WEIGHT_BITS = 2;
  452. const uint32_t A_WEIGHT_BITS = (mode == 4) ? 3 : 2;
  453. const uint32_t ENDPOINT_BITS = (mode == 4) ? 5 : 7;
  454. const uint32_t A_ENDPOINT_BITS = (mode == 4) ? 6 : 8;
  455. //const uint32_t WEIGHT_VALS = 1 << WEIGHT_BITS;
  456. //const uint32_t A_WEIGHT_VALS = 1 << A_WEIGHT_BITS;
  457. uint32_t bit_offset = 0;
  458. const uint8_t* pBuf = static_cast<const uint8_t*>(pBlock_bits);
  459. if (read_bits32(pBuf, bit_offset, mode + 1) != (1U << mode)) return false;
  460. const uint32_t comp_rot = read_bits32(pBuf, bit_offset, 2);
  461. const uint32_t index_mode = (mode == 4) ? read_bits32(pBuf, bit_offset, 1) : 0;
  462. color_rgba endpoints[ENDPOINTS];
  463. for (uint32_t c = 0; c < COMPS; c++)
  464. for (uint32_t e = 0; e < ENDPOINTS; e++)
  465. endpoints[e][c] = (uint8_t)read_bits32(pBuf, bit_offset, (c == 3) ? A_ENDPOINT_BITS : ENDPOINT_BITS);
  466. const uint32_t weight_bits[2] = { index_mode ? A_WEIGHT_BITS : WEIGHT_BITS, index_mode ? WEIGHT_BITS : A_WEIGHT_BITS };
  467. uint32_t weights[16], a_weights[16];
  468. for (uint32_t i = 0; i < 16; i++)
  469. (index_mode ? a_weights : weights)[i] = read_bits32(pBuf, bit_offset, weight_bits[index_mode] - ((!i) ? 1 : 0));
  470. for (uint32_t i = 0; i < 16; i++)
  471. (index_mode ? weights : a_weights)[i] = read_bits32(pBuf, bit_offset, weight_bits[1 - index_mode] - ((!i) ? 1 : 0));
  472. assert(bit_offset == 128);
  473. for (uint32_t e = 0; e < ENDPOINTS; e++)
  474. for (uint32_t c = 0; c < 4; c++)
  475. endpoints[e][c] = (uint8_t)bc7_dequant(endpoints[e][c], (c == 3) ? A_ENDPOINT_BITS : ENDPOINT_BITS);
  476. color_rgba block_colors[8];
  477. for (uint32_t i = 0; i < (1U << weight_bits[0]); i++)
  478. for (uint32_t c = 0; c < 3; c++)
  479. block_colors[i][c] = (uint8_t)bc7_interp(endpoints[0][c], endpoints[1][c], i, weight_bits[0]);
  480. for (uint32_t i = 0; i < (1U << weight_bits[1]); i++)
  481. block_colors[i][3] = (uint8_t)bc7_interp(endpoints[0][3], endpoints[1][3], i, weight_bits[1]);
  482. for (uint32_t i = 0; i < 16; i++)
  483. {
  484. pPixels[i] = block_colors[weights[i]];
  485. pPixels[i].a = block_colors[a_weights[i]].a;
  486. if (comp_rot >= 1)
  487. std::swap(pPixels[i].a, pPixels[i].m_comps[comp_rot - 1]);
  488. }
  489. return true;
  490. }
  491. struct bc7_mode_6
  492. {
  493. struct
  494. {
  495. uint64_t m_mode : 7;
  496. uint64_t m_r0 : 7;
  497. uint64_t m_r1 : 7;
  498. uint64_t m_g0 : 7;
  499. uint64_t m_g1 : 7;
  500. uint64_t m_b0 : 7;
  501. uint64_t m_b1 : 7;
  502. uint64_t m_a0 : 7;
  503. uint64_t m_a1 : 7;
  504. uint64_t m_p0 : 1;
  505. } m_lo;
  506. union
  507. {
  508. struct
  509. {
  510. uint64_t m_p1 : 1;
  511. uint64_t m_s00 : 3;
  512. uint64_t m_s10 : 4;
  513. uint64_t m_s20 : 4;
  514. uint64_t m_s30 : 4;
  515. uint64_t m_s01 : 4;
  516. uint64_t m_s11 : 4;
  517. uint64_t m_s21 : 4;
  518. uint64_t m_s31 : 4;
  519. uint64_t m_s02 : 4;
  520. uint64_t m_s12 : 4;
  521. uint64_t m_s22 : 4;
  522. uint64_t m_s32 : 4;
  523. uint64_t m_s03 : 4;
  524. uint64_t m_s13 : 4;
  525. uint64_t m_s23 : 4;
  526. uint64_t m_s33 : 4;
  527. } m_hi;
  528. uint64_t m_hi_bits;
  529. };
  530. };
  531. bool unpack_bc7_mode6(const void *pBlock_bits, color_rgba *pPixels)
  532. {
  533. static_assert(sizeof(bc7_mode_6) == 16, "sizeof(bc7_mode_6) == 16");
  534. const bc7_mode_6 &block = *static_cast<const bc7_mode_6 *>(pBlock_bits);
  535. if (block.m_lo.m_mode != (1 << 6))
  536. return false;
  537. const uint32_t r0 = (uint32_t)((block.m_lo.m_r0 << 1) | block.m_lo.m_p0);
  538. const uint32_t g0 = (uint32_t)((block.m_lo.m_g0 << 1) | block.m_lo.m_p0);
  539. const uint32_t b0 = (uint32_t)((block.m_lo.m_b0 << 1) | block.m_lo.m_p0);
  540. const uint32_t a0 = (uint32_t)((block.m_lo.m_a0 << 1) | block.m_lo.m_p0);
  541. const uint32_t r1 = (uint32_t)((block.m_lo.m_r1 << 1) | block.m_hi.m_p1);
  542. const uint32_t g1 = (uint32_t)((block.m_lo.m_g1 << 1) | block.m_hi.m_p1);
  543. const uint32_t b1 = (uint32_t)((block.m_lo.m_b1 << 1) | block.m_hi.m_p1);
  544. const uint32_t a1 = (uint32_t)((block.m_lo.m_a1 << 1) | block.m_hi.m_p1);
  545. color_rgba vals[16];
  546. for (uint32_t i = 0; i < 16; i++)
  547. {
  548. const uint32_t w = basist::g_bc7_weights4[i];
  549. const uint32_t iw = 64 - w;
  550. vals[i].set_noclamp_rgba(
  551. (r0 * iw + r1 * w + 32) >> 6,
  552. (g0 * iw + g1 * w + 32) >> 6,
  553. (b0 * iw + b1 * w + 32) >> 6,
  554. (a0 * iw + a1 * w + 32) >> 6);
  555. }
  556. pPixels[0] = vals[block.m_hi.m_s00];
  557. pPixels[1] = vals[block.m_hi.m_s10];
  558. pPixels[2] = vals[block.m_hi.m_s20];
  559. pPixels[3] = vals[block.m_hi.m_s30];
  560. pPixels[4] = vals[block.m_hi.m_s01];
  561. pPixels[5] = vals[block.m_hi.m_s11];
  562. pPixels[6] = vals[block.m_hi.m_s21];
  563. pPixels[7] = vals[block.m_hi.m_s31];
  564. pPixels[8] = vals[block.m_hi.m_s02];
  565. pPixels[9] = vals[block.m_hi.m_s12];
  566. pPixels[10] = vals[block.m_hi.m_s22];
  567. pPixels[11] = vals[block.m_hi.m_s32];
  568. pPixels[12] = vals[block.m_hi.m_s03];
  569. pPixels[13] = vals[block.m_hi.m_s13];
  570. pPixels[14] = vals[block.m_hi.m_s23];
  571. pPixels[15] = vals[block.m_hi.m_s33];
  572. return true;
  573. }
  574. bool unpack_bc7(const void *pBlock, color_rgba *pPixels)
  575. {
  576. const uint32_t first_byte = static_cast<const uint8_t*>(pBlock)[0];
  577. for (uint32_t mode = 0; mode <= 7; mode++)
  578. {
  579. if (first_byte & (1U << mode))
  580. {
  581. switch (mode)
  582. {
  583. case 0:
  584. case 2:
  585. return unpack_bc7_mode0_2(mode, pBlock, pPixels);
  586. case 1:
  587. case 3:
  588. case 7:
  589. return unpack_bc7_mode1_3_7(mode, pBlock, pPixels);
  590. case 4:
  591. case 5:
  592. return unpack_bc7_mode4_5(mode, pBlock, pPixels);
  593. case 6:
  594. return unpack_bc7_mode6(pBlock, pPixels);
  595. default:
  596. break;
  597. }
  598. }
  599. }
  600. return false;
  601. }
  602. struct fxt1_block
  603. {
  604. union
  605. {
  606. struct
  607. {
  608. uint64_t m_t00 : 2;
  609. uint64_t m_t01 : 2;
  610. uint64_t m_t02 : 2;
  611. uint64_t m_t03 : 2;
  612. uint64_t m_t04 : 2;
  613. uint64_t m_t05 : 2;
  614. uint64_t m_t06 : 2;
  615. uint64_t m_t07 : 2;
  616. uint64_t m_t08 : 2;
  617. uint64_t m_t09 : 2;
  618. uint64_t m_t10 : 2;
  619. uint64_t m_t11 : 2;
  620. uint64_t m_t12 : 2;
  621. uint64_t m_t13 : 2;
  622. uint64_t m_t14 : 2;
  623. uint64_t m_t15 : 2;
  624. uint64_t m_t16 : 2;
  625. uint64_t m_t17 : 2;
  626. uint64_t m_t18 : 2;
  627. uint64_t m_t19 : 2;
  628. uint64_t m_t20 : 2;
  629. uint64_t m_t21 : 2;
  630. uint64_t m_t22 : 2;
  631. uint64_t m_t23 : 2;
  632. uint64_t m_t24 : 2;
  633. uint64_t m_t25 : 2;
  634. uint64_t m_t26 : 2;
  635. uint64_t m_t27 : 2;
  636. uint64_t m_t28 : 2;
  637. uint64_t m_t29 : 2;
  638. uint64_t m_t30 : 2;
  639. uint64_t m_t31 : 2;
  640. } m_lo;
  641. uint64_t m_lo_bits;
  642. uint8_t m_sels[8];
  643. };
  644. union
  645. {
  646. struct
  647. {
  648. #ifdef BASISU_USE_ORIGINAL_3DFX_FXT1_ENCODING
  649. // This is the format that 3DFX's DECOMP.EXE tool expects, which I'm assuming is what the actual 3DFX hardware wanted.
  650. // Unfortunately, color0/color1 and color2/color3 are flipped relative to the official OpenGL extension and Intel's documentation!
  651. uint64_t m_b1 : 5;
  652. uint64_t m_g1 : 5;
  653. uint64_t m_r1 : 5;
  654. uint64_t m_b0 : 5;
  655. uint64_t m_g0 : 5;
  656. uint64_t m_r0 : 5;
  657. uint64_t m_b3 : 5;
  658. uint64_t m_g3 : 5;
  659. uint64_t m_r3 : 5;
  660. uint64_t m_b2 : 5;
  661. uint64_t m_g2 : 5;
  662. uint64_t m_r2 : 5;
  663. #else
  664. // Intel's encoding, and the encoding in the OpenGL FXT1 spec.
  665. uint64_t m_b0 : 5;
  666. uint64_t m_g0 : 5;
  667. uint64_t m_r0 : 5;
  668. uint64_t m_b1 : 5;
  669. uint64_t m_g1 : 5;
  670. uint64_t m_r1 : 5;
  671. uint64_t m_b2 : 5;
  672. uint64_t m_g2 : 5;
  673. uint64_t m_r2 : 5;
  674. uint64_t m_b3 : 5;
  675. uint64_t m_g3 : 5;
  676. uint64_t m_r3 : 5;
  677. #endif
  678. uint64_t m_alpha : 1;
  679. uint64_t m_glsb : 2;
  680. uint64_t m_mode : 1;
  681. } m_hi;
  682. uint64_t m_hi_bits;
  683. };
  684. };
  685. static color_rgba expand_565(const color_rgba& c)
  686. {
  687. return color_rgba((c.r << 3) | (c.r >> 2), (c.g << 2) | (c.g >> 4), (c.b << 3) | (c.b >> 2), 255);
  688. }
  689. // We only support CC_MIXED non-alpha blocks here because that's the only mode the transcoder uses at the moment.
  690. bool unpack_fxt1(const void *p, color_rgba *pPixels)
  691. {
  692. const fxt1_block* pBlock = static_cast<const fxt1_block*>(p);
  693. if (pBlock->m_hi.m_mode == 0)
  694. return false;
  695. if (pBlock->m_hi.m_alpha == 1)
  696. return false;
  697. color_rgba colors[4];
  698. colors[0].r = pBlock->m_hi.m_r0;
  699. colors[0].g = (uint8_t)((pBlock->m_hi.m_g0 << 1) | ((pBlock->m_lo.m_t00 >> 1) ^ (pBlock->m_hi.m_glsb & 1)));
  700. colors[0].b = pBlock->m_hi.m_b0;
  701. colors[0].a = 255;
  702. colors[1].r = pBlock->m_hi.m_r1;
  703. colors[1].g = (uint8_t)((pBlock->m_hi.m_g1 << 1) | (pBlock->m_hi.m_glsb & 1));
  704. colors[1].b = pBlock->m_hi.m_b1;
  705. colors[1].a = 255;
  706. colors[2].r = pBlock->m_hi.m_r2;
  707. colors[2].g = (uint8_t)((pBlock->m_hi.m_g2 << 1) | ((pBlock->m_lo.m_t16 >> 1) ^ (pBlock->m_hi.m_glsb >> 1)));
  708. colors[2].b = pBlock->m_hi.m_b2;
  709. colors[2].a = 255;
  710. colors[3].r = pBlock->m_hi.m_r3;
  711. colors[3].g = (uint8_t)((pBlock->m_hi.m_g3 << 1) | (pBlock->m_hi.m_glsb >> 1));
  712. colors[3].b = pBlock->m_hi.m_b3;
  713. colors[3].a = 255;
  714. for (uint32_t i = 0; i < 4; i++)
  715. colors[i] = expand_565(colors[i]);
  716. color_rgba block0_colors[4];
  717. block0_colors[0] = colors[0];
  718. block0_colors[1] = color_rgba((colors[0].r * 2 + colors[1].r + 1) / 3, (colors[0].g * 2 + colors[1].g + 1) / 3, (colors[0].b * 2 + colors[1].b + 1) / 3, 255);
  719. block0_colors[2] = color_rgba((colors[1].r * 2 + colors[0].r + 1) / 3, (colors[1].g * 2 + colors[0].g + 1) / 3, (colors[1].b * 2 + colors[0].b + 1) / 3, 255);
  720. block0_colors[3] = colors[1];
  721. for (uint32_t i = 0; i < 16; i++)
  722. {
  723. const uint32_t sel = (pBlock->m_sels[i >> 2] >> ((i & 3) * 2)) & 3;
  724. const uint32_t x = i & 3;
  725. const uint32_t y = i >> 2;
  726. pPixels[x + y * 8] = block0_colors[sel];
  727. }
  728. color_rgba block1_colors[4];
  729. block1_colors[0] = colors[2];
  730. block1_colors[1] = color_rgba((colors[2].r * 2 + colors[3].r + 1) / 3, (colors[2].g * 2 + colors[3].g + 1) / 3, (colors[2].b * 2 + colors[3].b + 1) / 3, 255);
  731. block1_colors[2] = color_rgba((colors[3].r * 2 + colors[2].r + 1) / 3, (colors[3].g * 2 + colors[2].g + 1) / 3, (colors[3].b * 2 + colors[2].b + 1) / 3, 255);
  732. block1_colors[3] = colors[3];
  733. for (uint32_t i = 0; i < 16; i++)
  734. {
  735. const uint32_t sel = (pBlock->m_sels[4 + (i >> 2)] >> ((i & 3) * 2)) & 3;
  736. const uint32_t x = i & 3;
  737. const uint32_t y = i >> 2;
  738. pPixels[4 + x + y * 8] = block1_colors[sel];
  739. }
  740. return true;
  741. }
  742. struct pvrtc2_block
  743. {
  744. uint8_t m_modulation[4];
  745. union
  746. {
  747. union
  748. {
  749. // Opaque mode: RGB colora=554 and colorb=555
  750. struct
  751. {
  752. uint32_t m_mod_flag : 1;
  753. uint32_t m_blue_a : 4;
  754. uint32_t m_green_a : 5;
  755. uint32_t m_red_a : 5;
  756. uint32_t m_hard_flag : 1;
  757. uint32_t m_blue_b : 5;
  758. uint32_t m_green_b : 5;
  759. uint32_t m_red_b : 5;
  760. uint32_t m_opaque_flag : 1;
  761. } m_opaque_color_data;
  762. // Transparent mode: RGBA colora=4433 and colorb=4443
  763. struct
  764. {
  765. uint32_t m_mod_flag : 1;
  766. uint32_t m_blue_a : 3;
  767. uint32_t m_green_a : 4;
  768. uint32_t m_red_a : 4;
  769. uint32_t m_alpha_a : 3;
  770. uint32_t m_hard_flag : 1;
  771. uint32_t m_blue_b : 4;
  772. uint32_t m_green_b : 4;
  773. uint32_t m_red_b : 4;
  774. uint32_t m_alpha_b : 3;
  775. uint32_t m_opaque_flag : 1;
  776. } m_trans_color_data;
  777. };
  778. uint32_t m_color_data_bits;
  779. };
  780. };
  781. static color_rgba convert_rgb_555_to_888(const color_rgba& col)
  782. {
  783. return color_rgba((col[0] << 3) | (col[0] >> 2), (col[1] << 3) | (col[1] >> 2), (col[2] << 3) | (col[2] >> 2), 255);
  784. }
  785. static color_rgba convert_rgba_5554_to_8888(const color_rgba& col)
  786. {
  787. return color_rgba((col[0] << 3) | (col[0] >> 2), (col[1] << 3) | (col[1] >> 2), (col[2] << 3) | (col[2] >> 2), (col[3] << 4) | col[3]);
  788. }
  789. // PVRTC2 is currently limited to only what our transcoder outputs (non-interpolated, hard_flag=1 modulation=0). In this mode, PVRTC2 looks much like BC1/ATC.
  790. bool unpack_pvrtc2(const void *p, color_rgba *pPixels)
  791. {
  792. const pvrtc2_block* pBlock = static_cast<const pvrtc2_block*>(p);
  793. if ((!pBlock->m_opaque_color_data.m_hard_flag) || (pBlock->m_opaque_color_data.m_mod_flag))
  794. {
  795. // This mode isn't supported by the transcoder, so we aren't bothering with it here.
  796. return false;
  797. }
  798. color_rgba colors[4];
  799. if (pBlock->m_opaque_color_data.m_opaque_flag)
  800. {
  801. // colora=554
  802. color_rgba color_a(pBlock->m_opaque_color_data.m_red_a, pBlock->m_opaque_color_data.m_green_a, (pBlock->m_opaque_color_data.m_blue_a << 1) | (pBlock->m_opaque_color_data.m_blue_a >> 3), 255);
  803. // colora=555
  804. color_rgba color_b(pBlock->m_opaque_color_data.m_red_b, pBlock->m_opaque_color_data.m_green_b, pBlock->m_opaque_color_data.m_blue_b, 255);
  805. colors[0] = convert_rgb_555_to_888(color_a);
  806. colors[3] = convert_rgb_555_to_888(color_b);
  807. colors[1].set((colors[0].r * 5 + colors[3].r * 3) / 8, (colors[0].g * 5 + colors[3].g * 3) / 8, (colors[0].b * 5 + colors[3].b * 3) / 8, 255);
  808. colors[2].set((colors[0].r * 3 + colors[3].r * 5) / 8, (colors[0].g * 3 + colors[3].g * 5) / 8, (colors[0].b * 3 + colors[3].b * 5) / 8, 255);
  809. }
  810. else
  811. {
  812. // colora=4433
  813. color_rgba color_a(
  814. (pBlock->m_trans_color_data.m_red_a << 1) | (pBlock->m_trans_color_data.m_red_a >> 3),
  815. (pBlock->m_trans_color_data.m_green_a << 1) | (pBlock->m_trans_color_data.m_green_a >> 3),
  816. (pBlock->m_trans_color_data.m_blue_a << 2) | (pBlock->m_trans_color_data.m_blue_a >> 1),
  817. pBlock->m_trans_color_data.m_alpha_a << 1);
  818. //colorb=4443
  819. color_rgba color_b(
  820. (pBlock->m_trans_color_data.m_red_b << 1) | (pBlock->m_trans_color_data.m_red_b >> 3),
  821. (pBlock->m_trans_color_data.m_green_b << 1) | (pBlock->m_trans_color_data.m_green_b >> 3),
  822. (pBlock->m_trans_color_data.m_blue_b << 1) | (pBlock->m_trans_color_data.m_blue_b >> 3),
  823. (pBlock->m_trans_color_data.m_alpha_b << 1) | 1);
  824. colors[0] = convert_rgba_5554_to_8888(color_a);
  825. colors[3] = convert_rgba_5554_to_8888(color_b);
  826. }
  827. colors[1].set((colors[0].r * 5 + colors[3].r * 3) / 8, (colors[0].g * 5 + colors[3].g * 3) / 8, (colors[0].b * 5 + colors[3].b * 3) / 8, (colors[0].a * 5 + colors[3].a * 3) / 8);
  828. colors[2].set((colors[0].r * 3 + colors[3].r * 5) / 8, (colors[0].g * 3 + colors[3].g * 5) / 8, (colors[0].b * 3 + colors[3].b * 5) / 8, (colors[0].a * 3 + colors[3].a * 5) / 8);
  829. for (uint32_t i = 0; i < 16; i++)
  830. {
  831. const uint32_t sel = (pBlock->m_modulation[i >> 2] >> ((i & 3) * 2)) & 3;
  832. pPixels[i] = colors[sel];
  833. }
  834. return true;
  835. }
  836. struct etc2_eac_r11
  837. {
  838. uint64_t m_base : 8;
  839. uint64_t m_table : 4;
  840. uint64_t m_mul : 4;
  841. uint64_t m_sels_0 : 8;
  842. uint64_t m_sels_1 : 8;
  843. uint64_t m_sels_2 : 8;
  844. uint64_t m_sels_3 : 8;
  845. uint64_t m_sels_4 : 8;
  846. uint64_t m_sels_5 : 8;
  847. uint64_t get_sels() const
  848. {
  849. return ((uint64_t)m_sels_0 << 40U) | ((uint64_t)m_sels_1 << 32U) | ((uint64_t)m_sels_2 << 24U) | ((uint64_t)m_sels_3 << 16U) | ((uint64_t)m_sels_4 << 8U) | m_sels_5;
  850. }
  851. void set_sels(uint64_t v)
  852. {
  853. m_sels_0 = (v >> 40U) & 0xFF;
  854. m_sels_1 = (v >> 32U) & 0xFF;
  855. m_sels_2 = (v >> 24U) & 0xFF;
  856. m_sels_3 = (v >> 16U) & 0xFF;
  857. m_sels_4 = (v >> 8U) & 0xFF;
  858. m_sels_5 = v & 0xFF;
  859. }
  860. };
  861. struct etc2_eac_rg11
  862. {
  863. etc2_eac_r11 m_c[2];
  864. };
  865. void unpack_etc2_eac_r(const void *p, color_rgba* pPixels, uint32_t c)
  866. {
  867. const etc2_eac_r11* pBlock = static_cast<const etc2_eac_r11*>(p);
  868. const uint64_t sels = pBlock->get_sels();
  869. const int base = (int)pBlock->m_base * 8 + 4;
  870. const int mul = pBlock->m_mul ? ((int)pBlock->m_mul * 8) : 1;
  871. const int table = (int)pBlock->m_table;
  872. for (uint32_t y = 0; y < 4; y++)
  873. {
  874. for (uint32_t x = 0; x < 4; x++)
  875. {
  876. const uint32_t shift = 45 - ((y + x * 4) * 3);
  877. const uint32_t sel = (uint32_t)((sels >> shift) & 7);
  878. int val = base + g_etc2_eac_tables[table][sel] * mul;
  879. val = clamp<int>(val, 0, 2047);
  880. // Convert to 8-bits with rounding
  881. //pPixels[x + y * 4].m_comps[c] = static_cast<uint8_t>((val * 255 + 1024) / 2047);
  882. pPixels[x + y * 4].m_comps[c] = static_cast<uint8_t>((val * 255 + 1023) / 2047);
  883. } // x
  884. } // y
  885. }
  886. void unpack_etc2_eac_rg(const void* p, color_rgba* pPixels)
  887. {
  888. for (uint32_t c = 0; c < 2; c++)
  889. {
  890. const etc2_eac_r11* pBlock = &static_cast<const etc2_eac_rg11*>(p)->m_c[c];
  891. unpack_etc2_eac_r(pBlock, pPixels, c);
  892. }
  893. }
  894. void unpack_uastc(const void* p, color_rgba* pPixels)
  895. {
  896. basist::unpack_uastc(*static_cast<const basist::uastc_block*>(p), (basist::color32 *)pPixels, false);
  897. }
  898. // Unpacks to RGBA, R, RG, or A
  899. bool unpack_block(texture_format fmt, const void* pBlock, color_rgba* pPixels)
  900. {
  901. switch (fmt)
  902. {
  903. case texture_format::cBC1:
  904. {
  905. unpack_bc1(pBlock, pPixels, true);
  906. break;
  907. }
  908. case texture_format::cBC1_NV:
  909. {
  910. unpack_bc1_nv(pBlock, pPixels, true);
  911. break;
  912. }
  913. case texture_format::cBC1_AMD:
  914. {
  915. unpack_bc1_amd(pBlock, pPixels, true);
  916. break;
  917. }
  918. case texture_format::cBC3:
  919. {
  920. return unpack_bc3(pBlock, pPixels);
  921. }
  922. case texture_format::cBC4:
  923. {
  924. // Unpack to R
  925. unpack_bc4(pBlock, &pPixels[0].r, sizeof(color_rgba));
  926. break;
  927. }
  928. case texture_format::cBC5:
  929. {
  930. unpack_bc5(pBlock, pPixels);
  931. break;
  932. }
  933. case texture_format::cBC7:
  934. {
  935. return unpack_bc7(pBlock, pPixels);
  936. }
  937. // Full ETC2 color blocks (planar/T/H modes) is currently unsupported in basisu, but we do support ETC2 with alpha (using ETC1 for color)
  938. case texture_format::cETC2_RGB:
  939. case texture_format::cETC1:
  940. case texture_format::cETC1S:
  941. {
  942. return unpack_etc1(*static_cast<const etc_block*>(pBlock), pPixels);
  943. }
  944. case texture_format::cETC2_RGBA:
  945. {
  946. if (!unpack_etc1(static_cast<const etc_block*>(pBlock)[1], pPixels))
  947. return false;
  948. unpack_etc2_eac(pBlock, pPixels);
  949. break;
  950. }
  951. case texture_format::cETC2_ALPHA:
  952. {
  953. // Unpack to A
  954. unpack_etc2_eac(pBlock, pPixels);
  955. break;
  956. }
  957. case texture_format::cASTC4x4:
  958. {
  959. const bool astc_srgb = false;
  960. basisu_astc::astc::decompress(reinterpret_cast<uint8_t*>(pPixels), static_cast<const uint8_t*>(pBlock), astc_srgb, 4, 4);
  961. break;
  962. }
  963. case texture_format::cATC_RGB:
  964. {
  965. unpack_atc(pBlock, pPixels);
  966. break;
  967. }
  968. case texture_format::cATC_RGBA_INTERPOLATED_ALPHA:
  969. {
  970. unpack_atc(static_cast<const uint8_t*>(pBlock) + 8, pPixels);
  971. unpack_bc4(pBlock, &pPixels[0].a, sizeof(color_rgba));
  972. break;
  973. }
  974. case texture_format::cFXT1_RGB:
  975. {
  976. unpack_fxt1(pBlock, pPixels);
  977. break;
  978. }
  979. case texture_format::cPVRTC2_4_RGBA:
  980. {
  981. unpack_pvrtc2(pBlock, pPixels);
  982. break;
  983. }
  984. case texture_format::cETC2_R11_EAC:
  985. {
  986. unpack_etc2_eac_r(static_cast<const etc2_eac_r11 *>(pBlock), pPixels, 0);
  987. break;
  988. }
  989. case texture_format::cETC2_RG11_EAC:
  990. {
  991. unpack_etc2_eac_rg(pBlock, pPixels);
  992. break;
  993. }
  994. case texture_format::cUASTC4x4:
  995. {
  996. unpack_uastc(pBlock, pPixels);
  997. break;
  998. }
  999. default:
  1000. {
  1001. assert(0);
  1002. // TODO
  1003. return false;
  1004. }
  1005. }
  1006. return true;
  1007. }
  1008. bool gpu_image::unpack(image& img) const
  1009. {
  1010. img.resize(get_pixel_width(), get_pixel_height());
  1011. img.set_all(g_black_color);
  1012. if (!img.get_width() || !img.get_height())
  1013. return true;
  1014. if ((m_fmt == texture_format::cPVRTC1_4_RGB) || (m_fmt == texture_format::cPVRTC1_4_RGBA))
  1015. {
  1016. pvrtc4_image pi(m_width, m_height);
  1017. if (get_total_blocks() != pi.get_total_blocks())
  1018. return false;
  1019. memcpy(&pi.get_blocks()[0], get_ptr(), get_size_in_bytes());
  1020. pi.deswizzle();
  1021. pi.unpack_all_pixels(img);
  1022. return true;
  1023. }
  1024. assert((m_block_width <= cMaxBlockSize) && (m_block_height <= cMaxBlockSize));
  1025. color_rgba pixels[cMaxBlockSize * cMaxBlockSize];
  1026. for (uint32_t i = 0; i < cMaxBlockSize * cMaxBlockSize; i++)
  1027. pixels[i] = g_black_color;
  1028. bool success = true;
  1029. for (uint32_t by = 0; by < m_blocks_y; by++)
  1030. {
  1031. for (uint32_t bx = 0; bx < m_blocks_x; bx++)
  1032. {
  1033. const void* pBlock = get_block_ptr(bx, by);
  1034. if (!unpack_block(m_fmt, pBlock, pixels))
  1035. success = false;
  1036. img.set_block_clipped(pixels, bx * m_block_width, by * m_block_height, m_block_width, m_block_height);
  1037. } // bx
  1038. } // by
  1039. return success;
  1040. }
  1041. static const uint8_t g_ktx_file_id[12] = { 0xAB, 0x4B, 0x54, 0x58, 0x20, 0x31, 0x31, 0xBB, 0x0D, 0x0A, 0x1A, 0x0A };
  1042. // KTX/GL enums
  1043. enum
  1044. {
  1045. KTX_ENDIAN = 0x04030201,
  1046. KTX_OPPOSITE_ENDIAN = 0x01020304,
  1047. KTX_ETC1_RGB8_OES = 0x8D64,
  1048. KTX_RED = 0x1903,
  1049. KTX_RG = 0x8227,
  1050. KTX_RGB = 0x1907,
  1051. KTX_RGBA = 0x1908,
  1052. KTX_COMPRESSED_RGB_S3TC_DXT1_EXT = 0x83F0,
  1053. KTX_COMPRESSED_RGBA_S3TC_DXT5_EXT = 0x83F3,
  1054. KTX_COMPRESSED_RED_RGTC1_EXT = 0x8DBB,
  1055. KTX_COMPRESSED_RED_GREEN_RGTC2_EXT = 0x8DBD,
  1056. KTX_COMPRESSED_RGB8_ETC2 = 0x9274,
  1057. KTX_COMPRESSED_RGBA8_ETC2_EAC = 0x9278,
  1058. KTX_COMPRESSED_RGBA_BPTC_UNORM = 0x8E8C,
  1059. KTX_COMPRESSED_SRGB_ALPHA_BPTC_UNORM = 0x8E8D,
  1060. KTX_COMPRESSED_RGB_PVRTC_4BPPV1_IMG = 0x8C00,
  1061. KTX_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG = 0x8C02,
  1062. KTX_COMPRESSED_RGBA_ASTC_4x4_KHR = 0x93B0,
  1063. KTX_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR = 0x93D0,
  1064. KTX_COMPRESSED_RGBA_UASTC_4x4_KHR = 0x94CC, // TODO - Use proper value!
  1065. KTX_ATC_RGB_AMD = 0x8C92,
  1066. KTX_ATC_RGBA_INTERPOLATED_ALPHA_AMD = 0x87EE,
  1067. KTX_COMPRESSED_RGB_FXT1_3DFX = 0x86B0,
  1068. KTX_COMPRESSED_RGBA_FXT1_3DFX = 0x86B1,
  1069. KTX_COMPRESSED_RGBA_PVRTC_4BPPV2_IMG = 0x9138,
  1070. KTX_COMPRESSED_R11_EAC = 0x9270,
  1071. KTX_COMPRESSED_RG11_EAC = 0x9272
  1072. };
  1073. struct ktx_header
  1074. {
  1075. uint8_t m_identifier[12];
  1076. packed_uint<4> m_endianness;
  1077. packed_uint<4> m_glType;
  1078. packed_uint<4> m_glTypeSize;
  1079. packed_uint<4> m_glFormat;
  1080. packed_uint<4> m_glInternalFormat;
  1081. packed_uint<4> m_glBaseInternalFormat;
  1082. packed_uint<4> m_pixelWidth;
  1083. packed_uint<4> m_pixelHeight;
  1084. packed_uint<4> m_pixelDepth;
  1085. packed_uint<4> m_numberOfArrayElements;
  1086. packed_uint<4> m_numberOfFaces;
  1087. packed_uint<4> m_numberOfMipmapLevels;
  1088. packed_uint<4> m_bytesOfKeyValueData;
  1089. void clear() { clear_obj(*this); }
  1090. };
  1091. // Input is a texture array of mipmapped gpu_image's: gpu_images[array_index][level_index]
  1092. bool create_ktx_texture_file(uint8_vec &ktx_data, const basisu::vector<gpu_image_vec>& gpu_images, bool cubemap_flag)
  1093. {
  1094. if (!gpu_images.size())
  1095. {
  1096. assert(0);
  1097. return false;
  1098. }
  1099. uint32_t width = 0, height = 0, total_levels = 0;
  1100. basisu::texture_format fmt = texture_format::cInvalidTextureFormat;
  1101. if (cubemap_flag)
  1102. {
  1103. if ((gpu_images.size() % 6) != 0)
  1104. {
  1105. assert(0);
  1106. return false;
  1107. }
  1108. }
  1109. for (uint32_t array_index = 0; array_index < gpu_images.size(); array_index++)
  1110. {
  1111. const gpu_image_vec &levels = gpu_images[array_index];
  1112. if (!levels.size())
  1113. {
  1114. // Empty mip chain
  1115. assert(0);
  1116. return false;
  1117. }
  1118. if (!array_index)
  1119. {
  1120. width = levels[0].get_pixel_width();
  1121. height = levels[0].get_pixel_height();
  1122. total_levels = (uint32_t)levels.size();
  1123. fmt = levels[0].get_format();
  1124. }
  1125. else
  1126. {
  1127. if ((width != levels[0].get_pixel_width()) ||
  1128. (height != levels[0].get_pixel_height()) ||
  1129. (total_levels != levels.size()))
  1130. {
  1131. // All cubemap/texture array faces must be the same dimension
  1132. assert(0);
  1133. return false;
  1134. }
  1135. }
  1136. for (uint32_t level_index = 0; level_index < levels.size(); level_index++)
  1137. {
  1138. if (level_index)
  1139. {
  1140. if ( (levels[level_index].get_pixel_width() != maximum<uint32_t>(1, levels[0].get_pixel_width() >> level_index)) ||
  1141. (levels[level_index].get_pixel_height() != maximum<uint32_t>(1, levels[0].get_pixel_height() >> level_index)) )
  1142. {
  1143. // Malformed mipmap chain
  1144. assert(0);
  1145. return false;
  1146. }
  1147. }
  1148. if (fmt != levels[level_index].get_format())
  1149. {
  1150. // All input textures must use the same GPU format
  1151. assert(0);
  1152. return false;
  1153. }
  1154. }
  1155. }
  1156. uint32_t internal_fmt = KTX_ETC1_RGB8_OES, base_internal_fmt = KTX_RGB;
  1157. switch (fmt)
  1158. {
  1159. case texture_format::cBC1:
  1160. case texture_format::cBC1_NV:
  1161. case texture_format::cBC1_AMD:
  1162. {
  1163. internal_fmt = KTX_COMPRESSED_RGB_S3TC_DXT1_EXT;
  1164. break;
  1165. }
  1166. case texture_format::cBC3:
  1167. {
  1168. internal_fmt = KTX_COMPRESSED_RGBA_S3TC_DXT5_EXT;
  1169. base_internal_fmt = KTX_RGBA;
  1170. break;
  1171. }
  1172. case texture_format::cBC4:
  1173. {
  1174. internal_fmt = KTX_COMPRESSED_RED_RGTC1_EXT;// KTX_COMPRESSED_LUMINANCE_LATC1_EXT;
  1175. base_internal_fmt = KTX_RED;
  1176. break;
  1177. }
  1178. case texture_format::cBC5:
  1179. {
  1180. internal_fmt = KTX_COMPRESSED_RED_GREEN_RGTC2_EXT;
  1181. base_internal_fmt = KTX_RG;
  1182. break;
  1183. }
  1184. case texture_format::cETC1:
  1185. case texture_format::cETC1S:
  1186. {
  1187. internal_fmt = KTX_ETC1_RGB8_OES;
  1188. break;
  1189. }
  1190. case texture_format::cETC2_RGB:
  1191. {
  1192. internal_fmt = KTX_COMPRESSED_RGB8_ETC2;
  1193. break;
  1194. }
  1195. case texture_format::cETC2_RGBA:
  1196. {
  1197. internal_fmt = KTX_COMPRESSED_RGBA8_ETC2_EAC;
  1198. base_internal_fmt = KTX_RGBA;
  1199. break;
  1200. }
  1201. case texture_format::cBC7:
  1202. {
  1203. internal_fmt = KTX_COMPRESSED_RGBA_BPTC_UNORM;
  1204. base_internal_fmt = KTX_RGBA;
  1205. break;
  1206. }
  1207. case texture_format::cPVRTC1_4_RGB:
  1208. {
  1209. internal_fmt = KTX_COMPRESSED_RGB_PVRTC_4BPPV1_IMG;
  1210. break;
  1211. }
  1212. case texture_format::cPVRTC1_4_RGBA:
  1213. {
  1214. internal_fmt = KTX_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG;
  1215. base_internal_fmt = KTX_RGBA;
  1216. break;
  1217. }
  1218. case texture_format::cASTC4x4:
  1219. {
  1220. internal_fmt = KTX_COMPRESSED_RGBA_ASTC_4x4_KHR;
  1221. base_internal_fmt = KTX_RGBA;
  1222. break;
  1223. }
  1224. case texture_format::cATC_RGB:
  1225. {
  1226. internal_fmt = KTX_ATC_RGB_AMD;
  1227. break;
  1228. }
  1229. case texture_format::cATC_RGBA_INTERPOLATED_ALPHA:
  1230. {
  1231. internal_fmt = KTX_ATC_RGBA_INTERPOLATED_ALPHA_AMD;
  1232. base_internal_fmt = KTX_RGBA;
  1233. break;
  1234. }
  1235. case texture_format::cETC2_R11_EAC:
  1236. {
  1237. internal_fmt = KTX_COMPRESSED_R11_EAC;
  1238. base_internal_fmt = KTX_RED;
  1239. break;
  1240. }
  1241. case texture_format::cETC2_RG11_EAC:
  1242. {
  1243. internal_fmt = KTX_COMPRESSED_RG11_EAC;
  1244. base_internal_fmt = KTX_RG;
  1245. break;
  1246. }
  1247. case texture_format::cUASTC4x4:
  1248. {
  1249. internal_fmt = KTX_COMPRESSED_RGBA_UASTC_4x4_KHR;
  1250. base_internal_fmt = KTX_RGBA;
  1251. break;
  1252. }
  1253. case texture_format::cFXT1_RGB:
  1254. {
  1255. internal_fmt = KTX_COMPRESSED_RGB_FXT1_3DFX;
  1256. break;
  1257. }
  1258. case texture_format::cPVRTC2_4_RGBA:
  1259. {
  1260. internal_fmt = KTX_COMPRESSED_RGBA_PVRTC_4BPPV2_IMG;
  1261. base_internal_fmt = KTX_RGBA;
  1262. break;
  1263. }
  1264. default:
  1265. {
  1266. // TODO
  1267. assert(0);
  1268. return false;
  1269. }
  1270. }
  1271. ktx_header header;
  1272. header.clear();
  1273. memcpy(&header.m_identifier, g_ktx_file_id, sizeof(g_ktx_file_id));
  1274. header.m_endianness = KTX_ENDIAN;
  1275. header.m_pixelWidth = width;
  1276. header.m_pixelHeight = height;
  1277. header.m_glInternalFormat = internal_fmt;
  1278. header.m_glBaseInternalFormat = base_internal_fmt;
  1279. header.m_numberOfArrayElements = (uint32_t)(cubemap_flag ? (gpu_images.size() / 6) : gpu_images.size());
  1280. if (header.m_numberOfArrayElements == 1)
  1281. header.m_numberOfArrayElements = 0;
  1282. header.m_numberOfMipmapLevels = total_levels;
  1283. header.m_numberOfFaces = cubemap_flag ? 6 : 1;
  1284. append_vector(ktx_data, (uint8_t *)&header, sizeof(header));
  1285. for (uint32_t level_index = 0; level_index < total_levels; level_index++)
  1286. {
  1287. uint32_t img_size = gpu_images[0][level_index].get_size_in_bytes();
  1288. if ((header.m_numberOfFaces == 1) || (header.m_numberOfArrayElements > 1))
  1289. {
  1290. img_size = img_size * header.m_numberOfFaces * maximum<uint32_t>(1, header.m_numberOfArrayElements);
  1291. }
  1292. assert(img_size && ((img_size & 3) == 0));
  1293. packed_uint<4> packed_img_size(img_size);
  1294. append_vector(ktx_data, (uint8_t *)&packed_img_size, sizeof(packed_img_size));
  1295. uint32_t bytes_written = 0;
  1296. for (uint32_t array_index = 0; array_index < maximum<uint32_t>(1, header.m_numberOfArrayElements); array_index++)
  1297. {
  1298. for (uint32_t face_index = 0; face_index < header.m_numberOfFaces; face_index++)
  1299. {
  1300. const gpu_image& img = gpu_images[cubemap_flag ? (array_index * 6 + face_index) : array_index][level_index];
  1301. append_vector(ktx_data, (uint8_t *)img.get_ptr(), img.get_size_in_bytes());
  1302. bytes_written += img.get_size_in_bytes();
  1303. }
  1304. } // array_index
  1305. } // level_index
  1306. return true;
  1307. }
  1308. bool write_compressed_texture_file(const char* pFilename, const basisu::vector<gpu_image_vec>& g, bool cubemap_flag)
  1309. {
  1310. std::string extension(string_tolower(string_get_extension(pFilename)));
  1311. uint8_vec filedata;
  1312. if (extension == "ktx")
  1313. {
  1314. if (!create_ktx_texture_file(filedata, g, cubemap_flag))
  1315. return false;
  1316. }
  1317. else if (extension == "pvr")
  1318. {
  1319. // TODO
  1320. return false;
  1321. }
  1322. else if (extension == "dds")
  1323. {
  1324. // TODO
  1325. return false;
  1326. }
  1327. else
  1328. {
  1329. // unsupported texture format
  1330. assert(0);
  1331. return false;
  1332. }
  1333. return basisu::write_vec_to_file(pFilename, filedata);
  1334. }
  1335. bool write_compressed_texture_file(const char* pFilename, const gpu_image& g)
  1336. {
  1337. basisu::vector<gpu_image_vec> v;
  1338. enlarge_vector(v, 1)->push_back(g);
  1339. return write_compressed_texture_file(pFilename, v, false);
  1340. }
  1341. //const uint32_t OUT_FILE_MAGIC = 'TEXC';
  1342. struct out_file_header
  1343. {
  1344. packed_uint<4> m_magic;
  1345. packed_uint<4> m_pad;
  1346. packed_uint<4> m_width;
  1347. packed_uint<4> m_height;
  1348. };
  1349. // As no modern tool supports FXT1 format .KTX files, let's write .OUT files and make sure 3DFX's original tools shipped in 1999 can decode our encoded output.
  1350. bool write_3dfx_out_file(const char* pFilename, const gpu_image& gi)
  1351. {
  1352. out_file_header hdr;
  1353. //hdr.m_magic = OUT_FILE_MAGIC;
  1354. hdr.m_magic.m_bytes[0] = 67;
  1355. hdr.m_magic.m_bytes[1] = 88;
  1356. hdr.m_magic.m_bytes[2] = 69;
  1357. hdr.m_magic.m_bytes[3] = 84;
  1358. hdr.m_pad = 0;
  1359. hdr.m_width = gi.get_blocks_x() * 8;
  1360. hdr.m_height = gi.get_blocks_y() * 4;
  1361. FILE* pFile = nullptr;
  1362. #ifdef _WIN32
  1363. fopen_s(&pFile, pFilename, "wb");
  1364. #else
  1365. pFile = fopen(pFilename, "wb");
  1366. #endif
  1367. if (!pFile)
  1368. return false;
  1369. fwrite(&hdr, sizeof(hdr), 1, pFile);
  1370. fwrite(gi.get_ptr(), gi.get_size_in_bytes(), 1, pFile);
  1371. return fclose(pFile) != EOF;
  1372. }
  1373. } // basisu