basisu_gpu_texture.cpp 63 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129
  1. // basisu_gpu_texture.cpp
  2. // Copyright (C) 2019-2024 Binomial LLC. All Rights Reserved.
  3. //
  4. // Licensed under the Apache License, Version 2.0 (the "License");
  5. // you may not use this file except in compliance with the License.
  6. // You may obtain a copy of the License at
  7. //
  8. // http://www.apache.org/licenses/LICENSE-2.0
  9. //
  10. // Unless required by applicable law or agreed to in writing, software
  11. // distributed under the License is distributed on an "AS IS" BASIS,
  12. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. // See the License for the specific language governing permissions and
  14. // limitations under the License.
  15. #include "basisu_gpu_texture.h"
  16. #include "basisu_enc.h"
  17. #include "basisu_pvrtc1_4.h"
  18. #include "3rdparty/android_astc_decomp.h"
  19. #include "basisu_bc7enc.h"
  20. #include "../transcoder/basisu_astc_hdr_core.h"
  21. namespace basisu
  22. {
  23. //------------------------------------------------------------------------------------------------
  24. // ETC2 EAC
  25. void unpack_etc2_eac(const void *pBlock_bits, color_rgba *pPixels)
  26. {
  27. static_assert(sizeof(eac_a8_block) == 8, "sizeof(eac_a8_block) == 8");
  28. const eac_a8_block *pBlock = static_cast<const eac_a8_block *>(pBlock_bits);
  29. const int8_t *pTable = g_etc2_eac_tables[pBlock->m_table];
  30. const uint64_t selector_bits = pBlock->get_selector_bits();
  31. const int32_t base = pBlock->m_base;
  32. const int32_t mul = pBlock->m_multiplier;
  33. pPixels[0].a = clamp255(base + pTable[pBlock->get_selector(0, 0, selector_bits)] * mul);
  34. pPixels[1].a = clamp255(base + pTable[pBlock->get_selector(1, 0, selector_bits)] * mul);
  35. pPixels[2].a = clamp255(base + pTable[pBlock->get_selector(2, 0, selector_bits)] * mul);
  36. pPixels[3].a = clamp255(base + pTable[pBlock->get_selector(3, 0, selector_bits)] * mul);
  37. pPixels[4].a = clamp255(base + pTable[pBlock->get_selector(0, 1, selector_bits)] * mul);
  38. pPixels[5].a = clamp255(base + pTable[pBlock->get_selector(1, 1, selector_bits)] * mul);
  39. pPixels[6].a = clamp255(base + pTable[pBlock->get_selector(2, 1, selector_bits)] * mul);
  40. pPixels[7].a = clamp255(base + pTable[pBlock->get_selector(3, 1, selector_bits)] * mul);
  41. pPixels[8].a = clamp255(base + pTable[pBlock->get_selector(0, 2, selector_bits)] * mul);
  42. pPixels[9].a = clamp255(base + pTable[pBlock->get_selector(1, 2, selector_bits)] * mul);
  43. pPixels[10].a = clamp255(base + pTable[pBlock->get_selector(2, 2, selector_bits)] * mul);
  44. pPixels[11].a = clamp255(base + pTable[pBlock->get_selector(3, 2, selector_bits)] * mul);
  45. pPixels[12].a = clamp255(base + pTable[pBlock->get_selector(0, 3, selector_bits)] * mul);
  46. pPixels[13].a = clamp255(base + pTable[pBlock->get_selector(1, 3, selector_bits)] * mul);
  47. pPixels[14].a = clamp255(base + pTable[pBlock->get_selector(2, 3, selector_bits)] * mul);
  48. pPixels[15].a = clamp255(base + pTable[pBlock->get_selector(3, 3, selector_bits)] * mul);
  49. }
  50. //------------------------------------------------------------------------------------------------
  51. // BC1
  52. struct bc1_block
  53. {
  54. enum { cTotalEndpointBytes = 2, cTotalSelectorBytes = 4 };
  55. uint8_t m_low_color[cTotalEndpointBytes];
  56. uint8_t m_high_color[cTotalEndpointBytes];
  57. uint8_t m_selectors[cTotalSelectorBytes];
  58. inline uint32_t get_high_color() const { return m_high_color[0] | (m_high_color[1] << 8U); }
  59. inline uint32_t get_low_color() const { return m_low_color[0] | (m_low_color[1] << 8U); }
  60. static void unpack_color(uint32_t c, uint32_t &r, uint32_t &g, uint32_t &b)
  61. {
  62. r = (c >> 11) & 31;
  63. g = (c >> 5) & 63;
  64. b = c & 31;
  65. r = (r << 3) | (r >> 2);
  66. g = (g << 2) | (g >> 4);
  67. b = (b << 3) | (b >> 2);
  68. }
  69. inline uint32_t get_selector(uint32_t x, uint32_t y) const { assert((x < 4U) && (y < 4U)); return (m_selectors[y] >> (x * 2)) & 3; }
  70. };
  71. // Returns true if the block uses 3 color punchthrough alpha mode.
  72. bool unpack_bc1(const void *pBlock_bits, color_rgba *pPixels, bool set_alpha)
  73. {
  74. static_assert(sizeof(bc1_block) == 8, "sizeof(bc1_block) == 8");
  75. const bc1_block *pBlock = static_cast<const bc1_block *>(pBlock_bits);
  76. const uint32_t l = pBlock->get_low_color();
  77. const uint32_t h = pBlock->get_high_color();
  78. color_rgba c[4];
  79. uint32_t r0, g0, b0, r1, g1, b1;
  80. bc1_block::unpack_color(l, r0, g0, b0);
  81. bc1_block::unpack_color(h, r1, g1, b1);
  82. c[0].set_noclamp_rgba(r0, g0, b0, 255);
  83. c[1].set_noclamp_rgba(r1, g1, b1, 255);
  84. bool used_punchthrough = false;
  85. if (l > h)
  86. {
  87. c[2].set_noclamp_rgba((r0 * 2 + r1) / 3, (g0 * 2 + g1) / 3, (b0 * 2 + b1) / 3, 255);
  88. c[3].set_noclamp_rgba((r1 * 2 + r0) / 3, (g1 * 2 + g0) / 3, (b1 * 2 + b0) / 3, 255);
  89. }
  90. else
  91. {
  92. c[2].set_noclamp_rgba((r0 + r1) / 2, (g0 + g1) / 2, (b0 + b1) / 2, 255);
  93. c[3].set_noclamp_rgba(0, 0, 0, 0);
  94. used_punchthrough = true;
  95. }
  96. if (set_alpha)
  97. {
  98. for (uint32_t y = 0; y < 4; y++, pPixels += 4)
  99. {
  100. pPixels[0] = c[pBlock->get_selector(0, y)];
  101. pPixels[1] = c[pBlock->get_selector(1, y)];
  102. pPixels[2] = c[pBlock->get_selector(2, y)];
  103. pPixels[3] = c[pBlock->get_selector(3, y)];
  104. }
  105. }
  106. else
  107. {
  108. for (uint32_t y = 0; y < 4; y++, pPixels += 4)
  109. {
  110. pPixels[0].set_rgb(c[pBlock->get_selector(0, y)]);
  111. pPixels[1].set_rgb(c[pBlock->get_selector(1, y)]);
  112. pPixels[2].set_rgb(c[pBlock->get_selector(2, y)]);
  113. pPixels[3].set_rgb(c[pBlock->get_selector(3, y)]);
  114. }
  115. }
  116. return used_punchthrough;
  117. }
  118. bool unpack_bc1_nv(const void *pBlock_bits, color_rgba *pPixels, bool set_alpha)
  119. {
  120. static_assert(sizeof(bc1_block) == 8, "sizeof(bc1_block) == 8");
  121. const bc1_block *pBlock = static_cast<const bc1_block *>(pBlock_bits);
  122. const uint32_t l = pBlock->get_low_color();
  123. const uint32_t h = pBlock->get_high_color();
  124. color_rgba c[4];
  125. int r0 = (l >> 11) & 31;
  126. int g0 = (l >> 5) & 63;
  127. int b0 = l & 31;
  128. int r1 = (h >> 11) & 31;
  129. int g1 = (h >> 5) & 63;
  130. int b1 = h & 31;
  131. c[0].b = (uint8_t)((3 * b0 * 22) / 8);
  132. c[0].g = (uint8_t)((g0 << 2) | (g0 >> 4));
  133. c[0].r = (uint8_t)((3 * r0 * 22) / 8);
  134. c[0].a = 0xFF;
  135. c[1].r = (uint8_t)((3 * r1 * 22) / 8);
  136. c[1].g = (uint8_t)((g1 << 2) | (g1 >> 4));
  137. c[1].b = (uint8_t)((3 * b1 * 22) / 8);
  138. c[1].a = 0xFF;
  139. int gdiff = c[1].g - c[0].g;
  140. bool used_punchthrough = false;
  141. if (l > h)
  142. {
  143. c[2].r = (uint8_t)(((2 * r0 + r1) * 22) / 8);
  144. c[2].g = (uint8_t)(((256 * c[0].g + gdiff/4 + 128 + gdiff * 80) / 256));
  145. c[2].b = (uint8_t)(((2 * b0 + b1) * 22) / 8);
  146. c[2].a = 0xFF;
  147. c[3].r = (uint8_t)(((2 * r1 + r0) * 22) / 8);
  148. c[3].g = (uint8_t)((256 * c[1].g - gdiff/4 + 128 - gdiff * 80) / 256);
  149. c[3].b = (uint8_t)(((2 * b1 + b0) * 22) / 8);
  150. c[3].a = 0xFF;
  151. }
  152. else
  153. {
  154. c[2].r = (uint8_t)(((r0 + r1) * 33) / 8);
  155. c[2].g = (uint8_t)((256 * c[0].g + gdiff/4 + 128 + gdiff * 128) / 256);
  156. c[2].b = (uint8_t)(((b0 + b1) * 33) / 8);
  157. c[2].a = 0xFF;
  158. c[3].set_noclamp_rgba(0, 0, 0, 0);
  159. used_punchthrough = true;
  160. }
  161. if (set_alpha)
  162. {
  163. for (uint32_t y = 0; y < 4; y++, pPixels += 4)
  164. {
  165. pPixels[0] = c[pBlock->get_selector(0, y)];
  166. pPixels[1] = c[pBlock->get_selector(1, y)];
  167. pPixels[2] = c[pBlock->get_selector(2, y)];
  168. pPixels[3] = c[pBlock->get_selector(3, y)];
  169. }
  170. }
  171. else
  172. {
  173. for (uint32_t y = 0; y < 4; y++, pPixels += 4)
  174. {
  175. pPixels[0].set_rgb(c[pBlock->get_selector(0, y)]);
  176. pPixels[1].set_rgb(c[pBlock->get_selector(1, y)]);
  177. pPixels[2].set_rgb(c[pBlock->get_selector(2, y)]);
  178. pPixels[3].set_rgb(c[pBlock->get_selector(3, y)]);
  179. }
  180. }
  181. return used_punchthrough;
  182. }
  183. static inline int interp_5_6_amd(int c0, int c1) { assert(c0 < 256 && c1 < 256); return (c0 * 43 + c1 * 21 + 32) >> 6; }
  184. static inline int interp_half_5_6_amd(int c0, int c1) { assert(c0 < 256 && c1 < 256); return (c0 + c1 + 1) >> 1; }
  185. bool unpack_bc1_amd(const void *pBlock_bits, color_rgba *pPixels, bool set_alpha)
  186. {
  187. const bc1_block *pBlock = static_cast<const bc1_block *>(pBlock_bits);
  188. const uint32_t l = pBlock->get_low_color();
  189. const uint32_t h = pBlock->get_high_color();
  190. color_rgba c[4];
  191. uint32_t r0, g0, b0, r1, g1, b1;
  192. bc1_block::unpack_color(l, r0, g0, b0);
  193. bc1_block::unpack_color(h, r1, g1, b1);
  194. c[0].set_noclamp_rgba(r0, g0, b0, 255);
  195. c[1].set_noclamp_rgba(r1, g1, b1, 255);
  196. bool used_punchthrough = false;
  197. if (l > h)
  198. {
  199. c[2].set_noclamp_rgba(interp_5_6_amd(r0, r1), interp_5_6_amd(g0, g1), interp_5_6_amd(b0, b1), 255);
  200. c[3].set_noclamp_rgba(interp_5_6_amd(r1, r0), interp_5_6_amd(g1, g0), interp_5_6_amd(b1, b0), 255);
  201. }
  202. else
  203. {
  204. c[2].set_noclamp_rgba(interp_half_5_6_amd(r0, r1), interp_half_5_6_amd(g0, g1), interp_half_5_6_amd(b0, b1), 255);
  205. c[3].set_noclamp_rgba(0, 0, 0, 0);
  206. used_punchthrough = true;
  207. }
  208. if (set_alpha)
  209. {
  210. for (uint32_t y = 0; y < 4; y++, pPixels += 4)
  211. {
  212. pPixels[0] = c[pBlock->get_selector(0, y)];
  213. pPixels[1] = c[pBlock->get_selector(1, y)];
  214. pPixels[2] = c[pBlock->get_selector(2, y)];
  215. pPixels[3] = c[pBlock->get_selector(3, y)];
  216. }
  217. }
  218. else
  219. {
  220. for (uint32_t y = 0; y < 4; y++, pPixels += 4)
  221. {
  222. pPixels[0].set_rgb(c[pBlock->get_selector(0, y)]);
  223. pPixels[1].set_rgb(c[pBlock->get_selector(1, y)]);
  224. pPixels[2].set_rgb(c[pBlock->get_selector(2, y)]);
  225. pPixels[3].set_rgb(c[pBlock->get_selector(3, y)]);
  226. }
  227. }
  228. return used_punchthrough;
  229. }
  230. //------------------------------------------------------------------------------------------------
  231. // BC3-5
  232. struct bc4_block
  233. {
  234. enum { cBC4SelectorBits = 3, cTotalSelectorBytes = 6, cMaxSelectorValues = 8 };
  235. uint8_t m_endpoints[2];
  236. uint8_t m_selectors[cTotalSelectorBytes];
  237. inline uint32_t get_low_alpha() const { return m_endpoints[0]; }
  238. inline uint32_t get_high_alpha() const { return m_endpoints[1]; }
  239. inline bool is_alpha6_block() const { return get_low_alpha() <= get_high_alpha(); }
  240. inline uint64_t get_selector_bits() const
  241. {
  242. return ((uint64_t)((uint32_t)m_selectors[0] | ((uint32_t)m_selectors[1] << 8U) | ((uint32_t)m_selectors[2] << 16U) | ((uint32_t)m_selectors[3] << 24U))) |
  243. (((uint64_t)m_selectors[4]) << 32U) |
  244. (((uint64_t)m_selectors[5]) << 40U);
  245. }
  246. inline uint32_t get_selector(uint32_t x, uint32_t y, uint64_t selector_bits) const
  247. {
  248. assert((x < 4U) && (y < 4U));
  249. return (selector_bits >> (((y * 4) + x) * cBC4SelectorBits)) & (cMaxSelectorValues - 1);
  250. }
  251. static inline uint32_t get_block_values6(uint8_t *pDst, uint32_t l, uint32_t h)
  252. {
  253. pDst[0] = static_cast<uint8_t>(l);
  254. pDst[1] = static_cast<uint8_t>(h);
  255. pDst[2] = static_cast<uint8_t>((l * 4 + h) / 5);
  256. pDst[3] = static_cast<uint8_t>((l * 3 + h * 2) / 5);
  257. pDst[4] = static_cast<uint8_t>((l * 2 + h * 3) / 5);
  258. pDst[5] = static_cast<uint8_t>((l + h * 4) / 5);
  259. pDst[6] = 0;
  260. pDst[7] = 255;
  261. return 6;
  262. }
  263. static inline uint32_t get_block_values8(uint8_t *pDst, uint32_t l, uint32_t h)
  264. {
  265. pDst[0] = static_cast<uint8_t>(l);
  266. pDst[1] = static_cast<uint8_t>(h);
  267. pDst[2] = static_cast<uint8_t>((l * 6 + h) / 7);
  268. pDst[3] = static_cast<uint8_t>((l * 5 + h * 2) / 7);
  269. pDst[4] = static_cast<uint8_t>((l * 4 + h * 3) / 7);
  270. pDst[5] = static_cast<uint8_t>((l * 3 + h * 4) / 7);
  271. pDst[6] = static_cast<uint8_t>((l * 2 + h * 5) / 7);
  272. pDst[7] = static_cast<uint8_t>((l + h * 6) / 7);
  273. return 8;
  274. }
  275. static inline uint32_t get_block_values(uint8_t *pDst, uint32_t l, uint32_t h)
  276. {
  277. if (l > h)
  278. return get_block_values8(pDst, l, h);
  279. else
  280. return get_block_values6(pDst, l, h);
  281. }
  282. };
  283. void unpack_bc4(const void *pBlock_bits, uint8_t *pPixels, uint32_t stride)
  284. {
  285. static_assert(sizeof(bc4_block) == 8, "sizeof(bc4_block) == 8");
  286. const bc4_block *pBlock = static_cast<const bc4_block *>(pBlock_bits);
  287. uint8_t sel_values[8];
  288. bc4_block::get_block_values(sel_values, pBlock->get_low_alpha(), pBlock->get_high_alpha());
  289. const uint64_t selector_bits = pBlock->get_selector_bits();
  290. for (uint32_t y = 0; y < 4; y++, pPixels += (stride * 4U))
  291. {
  292. pPixels[0] = sel_values[pBlock->get_selector(0, y, selector_bits)];
  293. pPixels[stride * 1] = sel_values[pBlock->get_selector(1, y, selector_bits)];
  294. pPixels[stride * 2] = sel_values[pBlock->get_selector(2, y, selector_bits)];
  295. pPixels[stride * 3] = sel_values[pBlock->get_selector(3, y, selector_bits)];
  296. }
  297. }
  298. // Returns false if the block uses 3-color punchthrough alpha mode, which isn't supported on some GPU's for BC3.
  299. bool unpack_bc3(const void *pBlock_bits, color_rgba *pPixels)
  300. {
  301. bool success = true;
  302. if (unpack_bc1((const uint8_t *)pBlock_bits + sizeof(bc4_block), pPixels, true))
  303. success = false;
  304. unpack_bc4(pBlock_bits, &pPixels[0].a, sizeof(color_rgba));
  305. return success;
  306. }
  307. // writes RG
  308. void unpack_bc5(const void *pBlock_bits, color_rgba *pPixels)
  309. {
  310. unpack_bc4(pBlock_bits, &pPixels[0].r, sizeof(color_rgba));
  311. unpack_bc4((const uint8_t *)pBlock_bits + sizeof(bc4_block), &pPixels[0].g, sizeof(color_rgba));
  312. }
  313. //------------------------------------------------------------------------------------------------
  314. // ATC isn't officially documented, so I'm assuming these references:
  315. // http://www.guildsoftware.com/papers/2012.Converting.DXTC.to.ATC.pdf
  316. // https://github.com/Triang3l/S3TConv/blob/master/s3tconv_atitc.c
  317. // The paper incorrectly says the ATC lerp factors are 1/3 and 2/3, but they are actually 3/8 and 5/8.
  318. void unpack_atc(const void* pBlock_bits, color_rgba* pPixels)
  319. {
  320. const uint8_t* pBytes = static_cast<const uint8_t*>(pBlock_bits);
  321. const uint16_t color0 = pBytes[0] | (pBytes[1] << 8U);
  322. const uint16_t color1 = pBytes[2] | (pBytes[3] << 8U);
  323. uint32_t sels = pBytes[4] | (pBytes[5] << 8U) | (pBytes[6] << 16U) | (pBytes[7] << 24U);
  324. const bool mode = (color0 & 0x8000) != 0;
  325. color_rgba c[4];
  326. c[0].set((color0 >> 10) & 31, (color0 >> 5) & 31, color0 & 31, 255);
  327. c[0].r = (c[0].r << 3) | (c[0].r >> 2);
  328. c[0].g = (c[0].g << 3) | (c[0].g >> 2);
  329. c[0].b = (c[0].b << 3) | (c[0].b >> 2);
  330. c[3].set((color1 >> 11) & 31, (color1 >> 5) & 63, color1 & 31, 255);
  331. c[3].r = (c[3].r << 3) | (c[3].r >> 2);
  332. c[3].g = (c[3].g << 2) | (c[3].g >> 4);
  333. c[3].b = (c[3].b << 3) | (c[3].b >> 2);
  334. if (mode)
  335. {
  336. c[1].set(basisu::maximum(0, c[0].r - (c[3].r >> 2)), basisu::maximum(0, c[0].g - (c[3].g >> 2)), basisu::maximum(0, c[0].b - (c[3].b >> 2)), 255);
  337. c[2] = c[0];
  338. c[0].set(0, 0, 0, 255);
  339. }
  340. else
  341. {
  342. c[1].r = (c[0].r * 5 + c[3].r * 3) >> 3;
  343. c[1].g = (c[0].g * 5 + c[3].g * 3) >> 3;
  344. c[1].b = (c[0].b * 5 + c[3].b * 3) >> 3;
  345. c[2].r = (c[0].r * 3 + c[3].r * 5) >> 3;
  346. c[2].g = (c[0].g * 3 + c[3].g * 5) >> 3;
  347. c[2].b = (c[0].b * 3 + c[3].b * 5) >> 3;
  348. }
  349. for (uint32_t i = 0; i < 16; i++)
  350. {
  351. const uint32_t s = sels & 3;
  352. pPixels[i] = c[s];
  353. sels >>= 2;
  354. }
  355. }
  356. //------------------------------------------------------------------------------------------------
  357. // BC7 mode 0-7 decompression.
  358. // Instead of one monster routine to unpack all the BC7 modes, we're lumping the 3 subset, 2 subset, 1 subset, and dual plane modes together into simple shared routines.
  359. static inline uint32_t bc7_dequant(uint32_t val, uint32_t pbit, uint32_t val_bits) { assert(val < (1U << val_bits)); assert(pbit < 2); assert(val_bits >= 4 && val_bits <= 8); const uint32_t total_bits = val_bits + 1; val = (val << 1) | pbit; val <<= (8 - total_bits); val |= (val >> total_bits); assert(val <= 255); return val; }
  360. static inline uint32_t bc7_dequant(uint32_t val, uint32_t val_bits) { assert(val < (1U << val_bits)); assert(val_bits >= 4 && val_bits <= 8); val <<= (8 - val_bits); val |= (val >> val_bits); assert(val <= 255); return val; }
  361. static inline uint32_t bc7_interp2(uint32_t l, uint32_t h, uint32_t w) { assert(w < 4); return (l * (64 - basist::g_bc7_weights2[w]) + h * basist::g_bc7_weights2[w] + 32) >> 6; }
  362. static inline uint32_t bc7_interp3(uint32_t l, uint32_t h, uint32_t w) { assert(w < 8); return (l * (64 - basist::g_bc7_weights3[w]) + h * basist::g_bc7_weights3[w] + 32) >> 6; }
  363. static inline uint32_t bc7_interp4(uint32_t l, uint32_t h, uint32_t w) { assert(w < 16); return (l * (64 - basist::g_bc7_weights4[w]) + h * basist::g_bc7_weights4[w] + 32) >> 6; }
  364. static inline uint32_t bc7_interp(uint32_t l, uint32_t h, uint32_t w, uint32_t bits)
  365. {
  366. assert(l <= 255 && h <= 255);
  367. switch (bits)
  368. {
  369. case 2: return bc7_interp2(l, h, w);
  370. case 3: return bc7_interp3(l, h, w);
  371. case 4: return bc7_interp4(l, h, w);
  372. default:
  373. break;
  374. }
  375. return 0;
  376. }
  377. bool unpack_bc7_mode0_2(uint32_t mode, const void* pBlock_bits, color_rgba* pPixels)
  378. {
  379. //const uint32_t SUBSETS = 3;
  380. const uint32_t ENDPOINTS = 6;
  381. const uint32_t COMPS = 3;
  382. const uint32_t WEIGHT_BITS = (mode == 0) ? 3 : 2;
  383. const uint32_t ENDPOINT_BITS = (mode == 0) ? 4 : 5;
  384. const uint32_t PBITS = (mode == 0) ? 6 : 0;
  385. const uint32_t WEIGHT_VALS = 1 << WEIGHT_BITS;
  386. uint32_t bit_offset = 0;
  387. const uint8_t* pBuf = static_cast<const uint8_t*>(pBlock_bits);
  388. if (read_bits32(pBuf, bit_offset, mode + 1) != (1U << mode)) return false;
  389. const uint32_t part = read_bits32(pBuf, bit_offset, (mode == 0) ? 4 : 6);
  390. color_rgba endpoints[ENDPOINTS];
  391. for (uint32_t c = 0; c < COMPS; c++)
  392. for (uint32_t e = 0; e < ENDPOINTS; e++)
  393. endpoints[e][c] = (uint8_t)read_bits32(pBuf, bit_offset, ENDPOINT_BITS);
  394. uint32_t pbits[6];
  395. for (uint32_t p = 0; p < PBITS; p++)
  396. pbits[p] = read_bits32(pBuf, bit_offset, 1);
  397. uint32_t weights[16];
  398. for (uint32_t i = 0; i < 16; i++)
  399. weights[i] = read_bits32(pBuf, bit_offset, ((!i) || (i == basist::g_bc7_table_anchor_index_third_subset_1[part]) || (i == basist::g_bc7_table_anchor_index_third_subset_2[part])) ? (WEIGHT_BITS - 1) : WEIGHT_BITS);
  400. assert(bit_offset == 128);
  401. for (uint32_t e = 0; e < ENDPOINTS; e++)
  402. for (uint32_t c = 0; c < 4; c++)
  403. endpoints[e][c] = (uint8_t)((c == 3) ? 255 : (PBITS ? bc7_dequant(endpoints[e][c], pbits[e], ENDPOINT_BITS) : bc7_dequant(endpoints[e][c], ENDPOINT_BITS)));
  404. color_rgba block_colors[3][8];
  405. for (uint32_t s = 0; s < 3; s++)
  406. for (uint32_t i = 0; i < WEIGHT_VALS; i++)
  407. {
  408. for (uint32_t c = 0; c < 3; c++)
  409. block_colors[s][i][c] = (uint8_t)bc7_interp(endpoints[s * 2 + 0][c], endpoints[s * 2 + 1][c], i, WEIGHT_BITS);
  410. block_colors[s][i][3] = 255;
  411. }
  412. for (uint32_t i = 0; i < 16; i++)
  413. pPixels[i] = block_colors[basist::g_bc7_partition3[part * 16 + i]][weights[i]];
  414. return true;
  415. }
  416. bool unpack_bc7_mode1_3_7(uint32_t mode, const void* pBlock_bits, color_rgba* pPixels)
  417. {
  418. //const uint32_t SUBSETS = 2;
  419. const uint32_t ENDPOINTS = 4;
  420. const uint32_t COMPS = (mode == 7) ? 4 : 3;
  421. const uint32_t WEIGHT_BITS = (mode == 1) ? 3 : 2;
  422. const uint32_t ENDPOINT_BITS = (mode == 7) ? 5 : ((mode == 1) ? 6 : 7);
  423. const uint32_t PBITS = (mode == 1) ? 2 : 4;
  424. const uint32_t SHARED_PBITS = (mode == 1) ? true : false;
  425. const uint32_t WEIGHT_VALS = 1 << WEIGHT_BITS;
  426. uint32_t bit_offset = 0;
  427. const uint8_t* pBuf = static_cast<const uint8_t*>(pBlock_bits);
  428. if (read_bits32(pBuf, bit_offset, mode + 1) != (1U << mode)) return false;
  429. const uint32_t part = read_bits32(pBuf, bit_offset, 6);
  430. color_rgba endpoints[ENDPOINTS];
  431. for (uint32_t c = 0; c < COMPS; c++)
  432. for (uint32_t e = 0; e < ENDPOINTS; e++)
  433. endpoints[e][c] = (uint8_t)read_bits32(pBuf, bit_offset, ENDPOINT_BITS);
  434. uint32_t pbits[4];
  435. for (uint32_t p = 0; p < PBITS; p++)
  436. pbits[p] = read_bits32(pBuf, bit_offset, 1);
  437. uint32_t weights[16];
  438. for (uint32_t i = 0; i < 16; i++)
  439. weights[i] = read_bits32(pBuf, bit_offset, ((!i) || (i == basist::g_bc7_table_anchor_index_second_subset[part])) ? (WEIGHT_BITS - 1) : WEIGHT_BITS);
  440. assert(bit_offset == 128);
  441. for (uint32_t e = 0; e < ENDPOINTS; e++)
  442. for (uint32_t c = 0; c < 4; c++)
  443. endpoints[e][c] = (uint8_t)((c == ((mode == 7U) ? 4U : 3U)) ? 255 : bc7_dequant(endpoints[e][c], pbits[SHARED_PBITS ? (e >> 1) : e], ENDPOINT_BITS));
  444. color_rgba block_colors[2][8];
  445. for (uint32_t s = 0; s < 2; s++)
  446. for (uint32_t i = 0; i < WEIGHT_VALS; i++)
  447. {
  448. for (uint32_t c = 0; c < COMPS; c++)
  449. block_colors[s][i][c] = (uint8_t)bc7_interp(endpoints[s * 2 + 0][c], endpoints[s * 2 + 1][c], i, WEIGHT_BITS);
  450. block_colors[s][i][3] = (COMPS == 3) ? 255 : block_colors[s][i][3];
  451. }
  452. for (uint32_t i = 0; i < 16; i++)
  453. pPixels[i] = block_colors[basist::g_bc7_partition2[part * 16 + i]][weights[i]];
  454. return true;
  455. }
  456. bool unpack_bc7_mode4_5(uint32_t mode, const void* pBlock_bits, color_rgba* pPixels)
  457. {
  458. const uint32_t ENDPOINTS = 2;
  459. const uint32_t COMPS = 4;
  460. const uint32_t WEIGHT_BITS = 2;
  461. const uint32_t A_WEIGHT_BITS = (mode == 4) ? 3 : 2;
  462. const uint32_t ENDPOINT_BITS = (mode == 4) ? 5 : 7;
  463. const uint32_t A_ENDPOINT_BITS = (mode == 4) ? 6 : 8;
  464. //const uint32_t WEIGHT_VALS = 1 << WEIGHT_BITS;
  465. //const uint32_t A_WEIGHT_VALS = 1 << A_WEIGHT_BITS;
  466. uint32_t bit_offset = 0;
  467. const uint8_t* pBuf = static_cast<const uint8_t*>(pBlock_bits);
  468. if (read_bits32(pBuf, bit_offset, mode + 1) != (1U << mode)) return false;
  469. const uint32_t comp_rot = read_bits32(pBuf, bit_offset, 2);
  470. const uint32_t index_mode = (mode == 4) ? read_bits32(pBuf, bit_offset, 1) : 0;
  471. color_rgba endpoints[ENDPOINTS];
  472. for (uint32_t c = 0; c < COMPS; c++)
  473. for (uint32_t e = 0; e < ENDPOINTS; e++)
  474. endpoints[e][c] = (uint8_t)read_bits32(pBuf, bit_offset, (c == 3) ? A_ENDPOINT_BITS : ENDPOINT_BITS);
  475. const uint32_t weight_bits[2] = { index_mode ? A_WEIGHT_BITS : WEIGHT_BITS, index_mode ? WEIGHT_BITS : A_WEIGHT_BITS };
  476. uint32_t weights[16], a_weights[16];
  477. for (uint32_t i = 0; i < 16; i++)
  478. (index_mode ? a_weights : weights)[i] = read_bits32(pBuf, bit_offset, weight_bits[index_mode] - ((!i) ? 1 : 0));
  479. for (uint32_t i = 0; i < 16; i++)
  480. (index_mode ? weights : a_weights)[i] = read_bits32(pBuf, bit_offset, weight_bits[1 - index_mode] - ((!i) ? 1 : 0));
  481. assert(bit_offset == 128);
  482. for (uint32_t e = 0; e < ENDPOINTS; e++)
  483. for (uint32_t c = 0; c < 4; c++)
  484. endpoints[e][c] = (uint8_t)bc7_dequant(endpoints[e][c], (c == 3) ? A_ENDPOINT_BITS : ENDPOINT_BITS);
  485. color_rgba block_colors[8];
  486. for (uint32_t i = 0; i < (1U << weight_bits[0]); i++)
  487. for (uint32_t c = 0; c < 3; c++)
  488. block_colors[i][c] = (uint8_t)bc7_interp(endpoints[0][c], endpoints[1][c], i, weight_bits[0]);
  489. for (uint32_t i = 0; i < (1U << weight_bits[1]); i++)
  490. block_colors[i][3] = (uint8_t)bc7_interp(endpoints[0][3], endpoints[1][3], i, weight_bits[1]);
  491. for (uint32_t i = 0; i < 16; i++)
  492. {
  493. pPixels[i] = block_colors[weights[i]];
  494. pPixels[i].a = block_colors[a_weights[i]].a;
  495. if (comp_rot >= 1)
  496. std::swap(pPixels[i].a, pPixels[i].m_comps[comp_rot - 1]);
  497. }
  498. return true;
  499. }
  500. struct bc7_mode_6
  501. {
  502. struct
  503. {
  504. uint64_t m_mode : 7;
  505. uint64_t m_r0 : 7;
  506. uint64_t m_r1 : 7;
  507. uint64_t m_g0 : 7;
  508. uint64_t m_g1 : 7;
  509. uint64_t m_b0 : 7;
  510. uint64_t m_b1 : 7;
  511. uint64_t m_a0 : 7;
  512. uint64_t m_a1 : 7;
  513. uint64_t m_p0 : 1;
  514. } m_lo;
  515. union
  516. {
  517. struct
  518. {
  519. uint64_t m_p1 : 1;
  520. uint64_t m_s00 : 3;
  521. uint64_t m_s10 : 4;
  522. uint64_t m_s20 : 4;
  523. uint64_t m_s30 : 4;
  524. uint64_t m_s01 : 4;
  525. uint64_t m_s11 : 4;
  526. uint64_t m_s21 : 4;
  527. uint64_t m_s31 : 4;
  528. uint64_t m_s02 : 4;
  529. uint64_t m_s12 : 4;
  530. uint64_t m_s22 : 4;
  531. uint64_t m_s32 : 4;
  532. uint64_t m_s03 : 4;
  533. uint64_t m_s13 : 4;
  534. uint64_t m_s23 : 4;
  535. uint64_t m_s33 : 4;
  536. } m_hi;
  537. uint64_t m_hi_bits;
  538. };
  539. };
  540. bool unpack_bc7_mode6(const void *pBlock_bits, color_rgba *pPixels)
  541. {
  542. static_assert(sizeof(bc7_mode_6) == 16, "sizeof(bc7_mode_6) == 16");
  543. const bc7_mode_6 &block = *static_cast<const bc7_mode_6 *>(pBlock_bits);
  544. if (block.m_lo.m_mode != (1 << 6))
  545. return false;
  546. const uint32_t r0 = (uint32_t)((block.m_lo.m_r0 << 1) | block.m_lo.m_p0);
  547. const uint32_t g0 = (uint32_t)((block.m_lo.m_g0 << 1) | block.m_lo.m_p0);
  548. const uint32_t b0 = (uint32_t)((block.m_lo.m_b0 << 1) | block.m_lo.m_p0);
  549. const uint32_t a0 = (uint32_t)((block.m_lo.m_a0 << 1) | block.m_lo.m_p0);
  550. const uint32_t r1 = (uint32_t)((block.m_lo.m_r1 << 1) | block.m_hi.m_p1);
  551. const uint32_t g1 = (uint32_t)((block.m_lo.m_g1 << 1) | block.m_hi.m_p1);
  552. const uint32_t b1 = (uint32_t)((block.m_lo.m_b1 << 1) | block.m_hi.m_p1);
  553. const uint32_t a1 = (uint32_t)((block.m_lo.m_a1 << 1) | block.m_hi.m_p1);
  554. color_rgba vals[16];
  555. for (uint32_t i = 0; i < 16; i++)
  556. {
  557. const uint32_t w = basist::g_bc7_weights4[i];
  558. const uint32_t iw = 64 - w;
  559. vals[i].set_noclamp_rgba(
  560. (r0 * iw + r1 * w + 32) >> 6,
  561. (g0 * iw + g1 * w + 32) >> 6,
  562. (b0 * iw + b1 * w + 32) >> 6,
  563. (a0 * iw + a1 * w + 32) >> 6);
  564. }
  565. pPixels[0] = vals[block.m_hi.m_s00];
  566. pPixels[1] = vals[block.m_hi.m_s10];
  567. pPixels[2] = vals[block.m_hi.m_s20];
  568. pPixels[3] = vals[block.m_hi.m_s30];
  569. pPixels[4] = vals[block.m_hi.m_s01];
  570. pPixels[5] = vals[block.m_hi.m_s11];
  571. pPixels[6] = vals[block.m_hi.m_s21];
  572. pPixels[7] = vals[block.m_hi.m_s31];
  573. pPixels[8] = vals[block.m_hi.m_s02];
  574. pPixels[9] = vals[block.m_hi.m_s12];
  575. pPixels[10] = vals[block.m_hi.m_s22];
  576. pPixels[11] = vals[block.m_hi.m_s32];
  577. pPixels[12] = vals[block.m_hi.m_s03];
  578. pPixels[13] = vals[block.m_hi.m_s13];
  579. pPixels[14] = vals[block.m_hi.m_s23];
  580. pPixels[15] = vals[block.m_hi.m_s33];
  581. return true;
  582. }
  583. bool unpack_bc7(const void *pBlock, color_rgba *pPixels)
  584. {
  585. const uint32_t first_byte = static_cast<const uint8_t*>(pBlock)[0];
  586. for (uint32_t mode = 0; mode <= 7; mode++)
  587. {
  588. if (first_byte & (1U << mode))
  589. {
  590. switch (mode)
  591. {
  592. case 0:
  593. case 2:
  594. return unpack_bc7_mode0_2(mode, pBlock, pPixels);
  595. case 1:
  596. case 3:
  597. case 7:
  598. return unpack_bc7_mode1_3_7(mode, pBlock, pPixels);
  599. case 4:
  600. case 5:
  601. return unpack_bc7_mode4_5(mode, pBlock, pPixels);
  602. case 6:
  603. return unpack_bc7_mode6(pBlock, pPixels);
  604. default:
  605. break;
  606. }
  607. }
  608. }
  609. return false;
  610. }
  611. static inline int bc6h_sign_extend(int val, int bits)
  612. {
  613. assert((bits >= 1) && (bits < 32));
  614. assert((val >= 0) && (val < (1 << bits)));
  615. return (val << (32 - bits)) >> (32 - bits);
  616. }
  617. static inline int bc6h_apply_delta(int base, int delta, int num_bits, int is_signed)
  618. {
  619. int bitmask = ((1 << num_bits) - 1);
  620. int v = (base + delta) & bitmask;
  621. return is_signed ? bc6h_sign_extend(v, num_bits) : v;
  622. }
  623. static int bc6h_dequantize(int val, int bits, int is_signed)
  624. {
  625. int result;
  626. if (is_signed)
  627. {
  628. if (bits >= 16)
  629. result = val;
  630. else
  631. {
  632. int s_flag = 0;
  633. if (val < 0)
  634. {
  635. s_flag = 1;
  636. val = -val;
  637. }
  638. if (val == 0)
  639. result = 0;
  640. else if (val >= ((1 << (bits - 1)) - 1))
  641. result = 0x7FFF;
  642. else
  643. result = ((val << 15) + 0x4000) >> (bits - 1);
  644. if (s_flag)
  645. result = -result;
  646. }
  647. }
  648. else
  649. {
  650. if (bits >= 15)
  651. result = val;
  652. else if (!val)
  653. result = 0;
  654. else if (val == ((1 << bits) - 1))
  655. result = 0xFFFF;
  656. else
  657. result = ((val << 16) + 0x8000) >> bits;
  658. }
  659. return result;
  660. }
  661. static inline int bc6h_interpolate(int a, int b, const uint8_t* pWeights, int index)
  662. {
  663. return (a * (64 - (int)pWeights[index]) + b * (int)pWeights[index] + 32) >> 6;
  664. }
  665. static inline basist::half_float bc6h_convert_to_half(int val, int is_signed)
  666. {
  667. if (!is_signed)
  668. {
  669. // scale by 31/64
  670. return (basist::half_float)((val * 31) >> 6);
  671. }
  672. // scale by 31/32
  673. val = (val < 0) ? -(((-val) * 31) >> 5) : (val * 31) >> 5;
  674. int s = 0;
  675. if (val < 0)
  676. {
  677. s = 0x8000;
  678. val = -val;
  679. }
  680. return (basist::half_float)(s | val);
  681. }
  682. static inline uint32_t bc6h_get_bits(uint32_t num_bits, uint64_t& l, uint64_t& h, uint32_t& total_bits)
  683. {
  684. assert((num_bits) && (num_bits <= 63));
  685. uint32_t v = (uint32_t)(l & ((1U << num_bits) - 1U));
  686. l >>= num_bits;
  687. l |= (h << (64U - num_bits));
  688. h >>= num_bits;
  689. total_bits += num_bits;
  690. assert(total_bits <= 128);
  691. return v;
  692. }
  693. static inline uint32_t bc6h_reverse_bits(uint32_t v, uint32_t num_bits)
  694. {
  695. uint32_t res = 0;
  696. for (uint32_t i = 0; i < num_bits; i++)
  697. {
  698. uint32_t bit = (v & (1u << i)) != 0u;
  699. res |= (bit << (num_bits - 1u - i));
  700. }
  701. return res;
  702. }
  703. static inline uint64_t bc6h_read_le_qword(const void* p)
  704. {
  705. const uint8_t* pSrc = static_cast<const uint8_t*>(p);
  706. return ((uint64_t)read_le_dword(pSrc)) | (((uint64_t)read_le_dword(pSrc + sizeof(uint32_t))) << 32U);
  707. }
  708. bool unpack_bc6h(const void* pSrc_block, void* pDst_block, bool is_signed, uint32_t dest_pitch_in_halfs)
  709. {
  710. assert(dest_pitch_in_halfs >= 4 * 3);
  711. const uint32_t MAX_SUBSETS = 2, MAX_COMPS = 3;
  712. const uint8_t* pSrc = static_cast<const uint8_t*>(pSrc_block);
  713. basist::half_float* pDst = static_cast<basist::half_float*>(pDst_block);
  714. uint64_t blo = bc6h_read_le_qword(pSrc), bhi = bc6h_read_le_qword(pSrc + sizeof(uint64_t));
  715. // Unpack mode
  716. const int mode = basist::g_bc6h_mode_lookup[blo & 31];
  717. if (mode < 0)
  718. {
  719. for (int y = 0; y < 4; y++)
  720. {
  721. memset(pDst, 0, sizeof(basist::half_float) * 4);
  722. pDst += dest_pitch_in_halfs;
  723. }
  724. return false;
  725. }
  726. // Skip mode bits
  727. uint32_t total_bits_read = 0;
  728. bc6h_get_bits((mode < 2) ? 2 : 5, blo, bhi, total_bits_read);
  729. assert(mode < (int)basist::NUM_BC6H_MODES);
  730. const uint32_t num_subsets = (mode >= 10) ? 1 : 2;
  731. const bool is_mode_9_or_10 = (mode == 9) || (mode == 10);
  732. // Unpack endpoint components
  733. int comps[MAX_SUBSETS][MAX_COMPS][2] = { { { 0 } } }; // [subset][comp][l/h]
  734. int part_index = 0;
  735. uint32_t layout_index = 0;
  736. while (layout_index < basist::MAX_BC6H_LAYOUT_INDEX)
  737. {
  738. const basist::bc6h_bit_layout& layout = basist::g_bc6h_bit_layouts[mode][layout_index];
  739. if (layout.m_comp < 0)
  740. break;
  741. const int subset = layout.m_index >> 1, lh_index = layout.m_index & 1;
  742. assert((layout.m_comp == 3) || ((subset >= 0) && (subset < (int)MAX_SUBSETS)));
  743. const int last_bit = layout.m_last_bit, first_bit = layout.m_first_bit;
  744. assert(last_bit >= 0);
  745. int& res = (layout.m_comp == 3) ? part_index : comps[subset][layout.m_comp][lh_index];
  746. if (first_bit < 0)
  747. {
  748. res |= (bc6h_get_bits(1, blo, bhi, total_bits_read) << last_bit);
  749. }
  750. else
  751. {
  752. const int total_bits = iabs(last_bit - first_bit) + 1;
  753. const int bit_shift = basisu::minimum(first_bit, last_bit);
  754. int b = bc6h_get_bits(total_bits, blo, bhi, total_bits_read);
  755. if (last_bit < first_bit)
  756. b = bc6h_reverse_bits(b, total_bits);
  757. res |= (b << bit_shift);
  758. }
  759. layout_index++;
  760. }
  761. assert(layout_index != basist::MAX_BC6H_LAYOUT_INDEX);
  762. // Sign extend/dequantize endpoints
  763. const int num_sig_bits = basist::g_bc6h_mode_sig_bits[mode][0];
  764. if (is_signed)
  765. {
  766. for (uint32_t comp = 0; comp < 3; comp++)
  767. comps[0][comp][0] = bc6h_sign_extend(comps[0][comp][0], num_sig_bits);
  768. }
  769. if (is_signed || !is_mode_9_or_10)
  770. {
  771. for (uint32_t subset = 0; subset < num_subsets; subset++)
  772. for (uint32_t comp = 0; comp < 3; comp++)
  773. for (uint32_t lh = (subset ? 0 : 1); lh < 2; lh++)
  774. comps[subset][comp][lh] = bc6h_sign_extend(comps[subset][comp][lh], basist::g_bc6h_mode_sig_bits[mode][1 + comp]);
  775. }
  776. if (!is_mode_9_or_10)
  777. {
  778. for (uint32_t subset = 0; subset < num_subsets; subset++)
  779. for (uint32_t comp = 0; comp < 3; comp++)
  780. for (uint32_t lh = (subset ? 0 : 1); lh < 2; lh++)
  781. comps[subset][comp][lh] = bc6h_apply_delta(comps[0][comp][0], comps[subset][comp][lh], num_sig_bits, is_signed);
  782. }
  783. for (uint32_t subset = 0; subset < num_subsets; subset++)
  784. for (uint32_t comp = 0; comp < 3; comp++)
  785. for (uint32_t lh = 0; lh < 2; lh++)
  786. comps[subset][comp][lh] = bc6h_dequantize(comps[subset][comp][lh], num_sig_bits, is_signed);
  787. // Now unpack weights and output texels
  788. const int weight_bits = (mode >= 10) ? 4 : 3;
  789. const uint8_t* pWeights = (mode >= 10) ? basist::g_bc6h_weight4 : basist::g_bc6h_weight3;
  790. dest_pitch_in_halfs -= 4 * 3;
  791. for (uint32_t y = 0; y < 4; y++)
  792. {
  793. for (uint32_t x = 0; x < 4; x++)
  794. {
  795. int subset = (num_subsets == 1) ? ((x | y) ? 0 : 0x80) : basist::g_bc6h_2subset_patterns[part_index][y][x];
  796. const int num_bits = weight_bits + ((subset & 0x80) ? -1 : 0);
  797. subset &= 1;
  798. const int weight_index = bc6h_get_bits(num_bits, blo, bhi, total_bits_read);
  799. pDst[0] = bc6h_convert_to_half(bc6h_interpolate(comps[subset][0][0], comps[subset][0][1], pWeights, weight_index), is_signed);
  800. pDst[1] = bc6h_convert_to_half(bc6h_interpolate(comps[subset][1][0], comps[subset][1][1], pWeights, weight_index), is_signed);
  801. pDst[2] = bc6h_convert_to_half(bc6h_interpolate(comps[subset][2][0], comps[subset][2][1], pWeights, weight_index), is_signed);
  802. pDst += 3;
  803. }
  804. pDst += dest_pitch_in_halfs;
  805. }
  806. assert(total_bits_read == 128);
  807. return true;
  808. }
  809. //------------------------------------------------------------------------------------------------
  810. // FXT1 (for fun, and because some modern Intel parts support it, and because a subset is like BC1)
  811. struct fxt1_block
  812. {
  813. union
  814. {
  815. struct
  816. {
  817. uint64_t m_t00 : 2;
  818. uint64_t m_t01 : 2;
  819. uint64_t m_t02 : 2;
  820. uint64_t m_t03 : 2;
  821. uint64_t m_t04 : 2;
  822. uint64_t m_t05 : 2;
  823. uint64_t m_t06 : 2;
  824. uint64_t m_t07 : 2;
  825. uint64_t m_t08 : 2;
  826. uint64_t m_t09 : 2;
  827. uint64_t m_t10 : 2;
  828. uint64_t m_t11 : 2;
  829. uint64_t m_t12 : 2;
  830. uint64_t m_t13 : 2;
  831. uint64_t m_t14 : 2;
  832. uint64_t m_t15 : 2;
  833. uint64_t m_t16 : 2;
  834. uint64_t m_t17 : 2;
  835. uint64_t m_t18 : 2;
  836. uint64_t m_t19 : 2;
  837. uint64_t m_t20 : 2;
  838. uint64_t m_t21 : 2;
  839. uint64_t m_t22 : 2;
  840. uint64_t m_t23 : 2;
  841. uint64_t m_t24 : 2;
  842. uint64_t m_t25 : 2;
  843. uint64_t m_t26 : 2;
  844. uint64_t m_t27 : 2;
  845. uint64_t m_t28 : 2;
  846. uint64_t m_t29 : 2;
  847. uint64_t m_t30 : 2;
  848. uint64_t m_t31 : 2;
  849. } m_lo;
  850. uint64_t m_lo_bits;
  851. uint8_t m_sels[8];
  852. };
  853. union
  854. {
  855. struct
  856. {
  857. #ifdef BASISU_USE_ORIGINAL_3DFX_FXT1_ENCODING
  858. // This is the format that 3DFX's DECOMP.EXE tool expects, which I'm assuming is what the actual 3DFX hardware wanted.
  859. // Unfortunately, color0/color1 and color2/color3 are flipped relative to the official OpenGL extension and Intel's documentation!
  860. uint64_t m_b1 : 5;
  861. uint64_t m_g1 : 5;
  862. uint64_t m_r1 : 5;
  863. uint64_t m_b0 : 5;
  864. uint64_t m_g0 : 5;
  865. uint64_t m_r0 : 5;
  866. uint64_t m_b3 : 5;
  867. uint64_t m_g3 : 5;
  868. uint64_t m_r3 : 5;
  869. uint64_t m_b2 : 5;
  870. uint64_t m_g2 : 5;
  871. uint64_t m_r2 : 5;
  872. #else
  873. // Intel's encoding, and the encoding in the OpenGL FXT1 spec.
  874. uint64_t m_b0 : 5;
  875. uint64_t m_g0 : 5;
  876. uint64_t m_r0 : 5;
  877. uint64_t m_b1 : 5;
  878. uint64_t m_g1 : 5;
  879. uint64_t m_r1 : 5;
  880. uint64_t m_b2 : 5;
  881. uint64_t m_g2 : 5;
  882. uint64_t m_r2 : 5;
  883. uint64_t m_b3 : 5;
  884. uint64_t m_g3 : 5;
  885. uint64_t m_r3 : 5;
  886. #endif
  887. uint64_t m_alpha : 1;
  888. uint64_t m_glsb : 2;
  889. uint64_t m_mode : 1;
  890. } m_hi;
  891. uint64_t m_hi_bits;
  892. };
  893. };
  894. static color_rgba expand_565(const color_rgba& c)
  895. {
  896. return color_rgba((c.r << 3) | (c.r >> 2), (c.g << 2) | (c.g >> 4), (c.b << 3) | (c.b >> 2), 255);
  897. }
  898. // We only support CC_MIXED non-alpha blocks here because that's the only mode the transcoder uses at the moment.
  899. bool unpack_fxt1(const void *p, color_rgba *pPixels)
  900. {
  901. const fxt1_block* pBlock = static_cast<const fxt1_block*>(p);
  902. if (pBlock->m_hi.m_mode == 0)
  903. return false;
  904. if (pBlock->m_hi.m_alpha == 1)
  905. return false;
  906. color_rgba colors[4];
  907. colors[0].r = pBlock->m_hi.m_r0;
  908. colors[0].g = (uint8_t)((pBlock->m_hi.m_g0 << 1) | ((pBlock->m_lo.m_t00 >> 1) ^ (pBlock->m_hi.m_glsb & 1)));
  909. colors[0].b = pBlock->m_hi.m_b0;
  910. colors[0].a = 255;
  911. colors[1].r = pBlock->m_hi.m_r1;
  912. colors[1].g = (uint8_t)((pBlock->m_hi.m_g1 << 1) | (pBlock->m_hi.m_glsb & 1));
  913. colors[1].b = pBlock->m_hi.m_b1;
  914. colors[1].a = 255;
  915. colors[2].r = pBlock->m_hi.m_r2;
  916. colors[2].g = (uint8_t)((pBlock->m_hi.m_g2 << 1) | ((pBlock->m_lo.m_t16 >> 1) ^ (pBlock->m_hi.m_glsb >> 1)));
  917. colors[2].b = pBlock->m_hi.m_b2;
  918. colors[2].a = 255;
  919. colors[3].r = pBlock->m_hi.m_r3;
  920. colors[3].g = (uint8_t)((pBlock->m_hi.m_g3 << 1) | (pBlock->m_hi.m_glsb >> 1));
  921. colors[3].b = pBlock->m_hi.m_b3;
  922. colors[3].a = 255;
  923. for (uint32_t i = 0; i < 4; i++)
  924. colors[i] = expand_565(colors[i]);
  925. color_rgba block0_colors[4];
  926. block0_colors[0] = colors[0];
  927. block0_colors[1] = color_rgba((colors[0].r * 2 + colors[1].r + 1) / 3, (colors[0].g * 2 + colors[1].g + 1) / 3, (colors[0].b * 2 + colors[1].b + 1) / 3, 255);
  928. block0_colors[2] = color_rgba((colors[1].r * 2 + colors[0].r + 1) / 3, (colors[1].g * 2 + colors[0].g + 1) / 3, (colors[1].b * 2 + colors[0].b + 1) / 3, 255);
  929. block0_colors[3] = colors[1];
  930. for (uint32_t i = 0; i < 16; i++)
  931. {
  932. const uint32_t sel = (pBlock->m_sels[i >> 2] >> ((i & 3) * 2)) & 3;
  933. const uint32_t x = i & 3;
  934. const uint32_t y = i >> 2;
  935. pPixels[x + y * 8] = block0_colors[sel];
  936. }
  937. color_rgba block1_colors[4];
  938. block1_colors[0] = colors[2];
  939. block1_colors[1] = color_rgba((colors[2].r * 2 + colors[3].r + 1) / 3, (colors[2].g * 2 + colors[3].g + 1) / 3, (colors[2].b * 2 + colors[3].b + 1) / 3, 255);
  940. block1_colors[2] = color_rgba((colors[3].r * 2 + colors[2].r + 1) / 3, (colors[3].g * 2 + colors[2].g + 1) / 3, (colors[3].b * 2 + colors[2].b + 1) / 3, 255);
  941. block1_colors[3] = colors[3];
  942. for (uint32_t i = 0; i < 16; i++)
  943. {
  944. const uint32_t sel = (pBlock->m_sels[4 + (i >> 2)] >> ((i & 3) * 2)) & 3;
  945. const uint32_t x = i & 3;
  946. const uint32_t y = i >> 2;
  947. pPixels[4 + x + y * 8] = block1_colors[sel];
  948. }
  949. return true;
  950. }
  951. //------------------------------------------------------------------------------------------------
  952. // PVRTC2 (non-interpolated, hard_flag=1 modulation=0 subset only!)
  953. struct pvrtc2_block
  954. {
  955. uint8_t m_modulation[4];
  956. union
  957. {
  958. union
  959. {
  960. // Opaque mode: RGB colora=554 and colorb=555
  961. struct
  962. {
  963. uint32_t m_mod_flag : 1;
  964. uint32_t m_blue_a : 4;
  965. uint32_t m_green_a : 5;
  966. uint32_t m_red_a : 5;
  967. uint32_t m_hard_flag : 1;
  968. uint32_t m_blue_b : 5;
  969. uint32_t m_green_b : 5;
  970. uint32_t m_red_b : 5;
  971. uint32_t m_opaque_flag : 1;
  972. } m_opaque_color_data;
  973. // Transparent mode: RGBA colora=4433 and colorb=4443
  974. struct
  975. {
  976. uint32_t m_mod_flag : 1;
  977. uint32_t m_blue_a : 3;
  978. uint32_t m_green_a : 4;
  979. uint32_t m_red_a : 4;
  980. uint32_t m_alpha_a : 3;
  981. uint32_t m_hard_flag : 1;
  982. uint32_t m_blue_b : 4;
  983. uint32_t m_green_b : 4;
  984. uint32_t m_red_b : 4;
  985. uint32_t m_alpha_b : 3;
  986. uint32_t m_opaque_flag : 1;
  987. } m_trans_color_data;
  988. };
  989. uint32_t m_color_data_bits;
  990. };
  991. };
  992. static color_rgba convert_rgb_555_to_888(const color_rgba& col)
  993. {
  994. return color_rgba((col[0] << 3) | (col[0] >> 2), (col[1] << 3) | (col[1] >> 2), (col[2] << 3) | (col[2] >> 2), 255);
  995. }
  996. static color_rgba convert_rgba_5554_to_8888(const color_rgba& col)
  997. {
  998. return color_rgba((col[0] << 3) | (col[0] >> 2), (col[1] << 3) | (col[1] >> 2), (col[2] << 3) | (col[2] >> 2), (col[3] << 4) | col[3]);
  999. }
  1000. // PVRTC2 is currently limited to only what our transcoder outputs (non-interpolated, hard_flag=1 modulation=0). In this mode, PVRTC2 looks much like BC1/ATC.
  1001. bool unpack_pvrtc2(const void *p, color_rgba *pPixels)
  1002. {
  1003. const pvrtc2_block* pBlock = static_cast<const pvrtc2_block*>(p);
  1004. if ((!pBlock->m_opaque_color_data.m_hard_flag) || (pBlock->m_opaque_color_data.m_mod_flag))
  1005. {
  1006. // This mode isn't supported by the transcoder, so we aren't bothering with it here.
  1007. return false;
  1008. }
  1009. color_rgba colors[4];
  1010. if (pBlock->m_opaque_color_data.m_opaque_flag)
  1011. {
  1012. // colora=554
  1013. color_rgba color_a(pBlock->m_opaque_color_data.m_red_a, pBlock->m_opaque_color_data.m_green_a, (pBlock->m_opaque_color_data.m_blue_a << 1) | (pBlock->m_opaque_color_data.m_blue_a >> 3), 255);
  1014. // colora=555
  1015. color_rgba color_b(pBlock->m_opaque_color_data.m_red_b, pBlock->m_opaque_color_data.m_green_b, pBlock->m_opaque_color_data.m_blue_b, 255);
  1016. colors[0] = convert_rgb_555_to_888(color_a);
  1017. colors[3] = convert_rgb_555_to_888(color_b);
  1018. colors[1].set((colors[0].r * 5 + colors[3].r * 3) / 8, (colors[0].g * 5 + colors[3].g * 3) / 8, (colors[0].b * 5 + colors[3].b * 3) / 8, 255);
  1019. colors[2].set((colors[0].r * 3 + colors[3].r * 5) / 8, (colors[0].g * 3 + colors[3].g * 5) / 8, (colors[0].b * 3 + colors[3].b * 5) / 8, 255);
  1020. }
  1021. else
  1022. {
  1023. // colora=4433
  1024. color_rgba color_a(
  1025. (pBlock->m_trans_color_data.m_red_a << 1) | (pBlock->m_trans_color_data.m_red_a >> 3),
  1026. (pBlock->m_trans_color_data.m_green_a << 1) | (pBlock->m_trans_color_data.m_green_a >> 3),
  1027. (pBlock->m_trans_color_data.m_blue_a << 2) | (pBlock->m_trans_color_data.m_blue_a >> 1),
  1028. pBlock->m_trans_color_data.m_alpha_a << 1);
  1029. //colorb=4443
  1030. color_rgba color_b(
  1031. (pBlock->m_trans_color_data.m_red_b << 1) | (pBlock->m_trans_color_data.m_red_b >> 3),
  1032. (pBlock->m_trans_color_data.m_green_b << 1) | (pBlock->m_trans_color_data.m_green_b >> 3),
  1033. (pBlock->m_trans_color_data.m_blue_b << 1) | (pBlock->m_trans_color_data.m_blue_b >> 3),
  1034. (pBlock->m_trans_color_data.m_alpha_b << 1) | 1);
  1035. colors[0] = convert_rgba_5554_to_8888(color_a);
  1036. colors[3] = convert_rgba_5554_to_8888(color_b);
  1037. }
  1038. colors[1].set((colors[0].r * 5 + colors[3].r * 3) / 8, (colors[0].g * 5 + colors[3].g * 3) / 8, (colors[0].b * 5 + colors[3].b * 3) / 8, (colors[0].a * 5 + colors[3].a * 3) / 8);
  1039. colors[2].set((colors[0].r * 3 + colors[3].r * 5) / 8, (colors[0].g * 3 + colors[3].g * 5) / 8, (colors[0].b * 3 + colors[3].b * 5) / 8, (colors[0].a * 3 + colors[3].a * 5) / 8);
  1040. for (uint32_t i = 0; i < 16; i++)
  1041. {
  1042. const uint32_t sel = (pBlock->m_modulation[i >> 2] >> ((i & 3) * 2)) & 3;
  1043. pPixels[i] = colors[sel];
  1044. }
  1045. return true;
  1046. }
  1047. //------------------------------------------------------------------------------------------------
  1048. // ETC2 EAC R11 or RG11
  1049. struct etc2_eac_r11
  1050. {
  1051. uint64_t m_base : 8;
  1052. uint64_t m_table : 4;
  1053. uint64_t m_mul : 4;
  1054. uint64_t m_sels_0 : 8;
  1055. uint64_t m_sels_1 : 8;
  1056. uint64_t m_sels_2 : 8;
  1057. uint64_t m_sels_3 : 8;
  1058. uint64_t m_sels_4 : 8;
  1059. uint64_t m_sels_5 : 8;
  1060. uint64_t get_sels() const
  1061. {
  1062. return ((uint64_t)m_sels_0 << 40U) | ((uint64_t)m_sels_1 << 32U) | ((uint64_t)m_sels_2 << 24U) | ((uint64_t)m_sels_3 << 16U) | ((uint64_t)m_sels_4 << 8U) | m_sels_5;
  1063. }
  1064. void set_sels(uint64_t v)
  1065. {
  1066. m_sels_0 = (v >> 40U) & 0xFF;
  1067. m_sels_1 = (v >> 32U) & 0xFF;
  1068. m_sels_2 = (v >> 24U) & 0xFF;
  1069. m_sels_3 = (v >> 16U) & 0xFF;
  1070. m_sels_4 = (v >> 8U) & 0xFF;
  1071. m_sels_5 = v & 0xFF;
  1072. }
  1073. };
  1074. struct etc2_eac_rg11
  1075. {
  1076. etc2_eac_r11 m_c[2];
  1077. };
  1078. void unpack_etc2_eac_r(const void *p, color_rgba* pPixels, uint32_t c)
  1079. {
  1080. const etc2_eac_r11* pBlock = static_cast<const etc2_eac_r11*>(p);
  1081. const uint64_t sels = pBlock->get_sels();
  1082. const int base = (int)pBlock->m_base * 8 + 4;
  1083. const int mul = pBlock->m_mul ? ((int)pBlock->m_mul * 8) : 1;
  1084. const int table = (int)pBlock->m_table;
  1085. for (uint32_t y = 0; y < 4; y++)
  1086. {
  1087. for (uint32_t x = 0; x < 4; x++)
  1088. {
  1089. const uint32_t shift = 45 - ((y + x * 4) * 3);
  1090. const uint32_t sel = (uint32_t)((sels >> shift) & 7);
  1091. int val = base + g_etc2_eac_tables[table][sel] * mul;
  1092. val = clamp<int>(val, 0, 2047);
  1093. // Convert to 8-bits with rounding
  1094. //pPixels[x + y * 4].m_comps[c] = static_cast<uint8_t>((val * 255 + 1024) / 2047);
  1095. pPixels[x + y * 4].m_comps[c] = static_cast<uint8_t>((val * 255 + 1023) / 2047);
  1096. } // x
  1097. } // y
  1098. }
  1099. void unpack_etc2_eac_rg(const void* p, color_rgba* pPixels)
  1100. {
  1101. for (uint32_t c = 0; c < 2; c++)
  1102. {
  1103. const etc2_eac_r11* pBlock = &static_cast<const etc2_eac_rg11*>(p)->m_c[c];
  1104. unpack_etc2_eac_r(pBlock, pPixels, c);
  1105. }
  1106. }
  1107. //------------------------------------------------------------------------------------------------
  1108. // UASTC
  1109. void unpack_uastc(const void* p, color_rgba* pPixels)
  1110. {
  1111. basist::unpack_uastc(*static_cast<const basist::uastc_block*>(p), (basist::color32 *)pPixels, false);
  1112. }
  1113. // Unpacks to RGBA, R, RG, or A. LDR GPU texture formats only.
  1114. bool unpack_block(texture_format fmt, const void* pBlock, color_rgba* pPixels)
  1115. {
  1116. switch (fmt)
  1117. {
  1118. case texture_format::cBC1:
  1119. {
  1120. unpack_bc1(pBlock, pPixels, true);
  1121. break;
  1122. }
  1123. case texture_format::cBC1_NV:
  1124. {
  1125. unpack_bc1_nv(pBlock, pPixels, true);
  1126. break;
  1127. }
  1128. case texture_format::cBC1_AMD:
  1129. {
  1130. unpack_bc1_amd(pBlock, pPixels, true);
  1131. break;
  1132. }
  1133. case texture_format::cBC3:
  1134. {
  1135. return unpack_bc3(pBlock, pPixels);
  1136. }
  1137. case texture_format::cBC4:
  1138. {
  1139. // Unpack to R
  1140. unpack_bc4(pBlock, &pPixels[0].r, sizeof(color_rgba));
  1141. break;
  1142. }
  1143. case texture_format::cBC5:
  1144. {
  1145. unpack_bc5(pBlock, pPixels);
  1146. break;
  1147. }
  1148. case texture_format::cBC7:
  1149. {
  1150. return unpack_bc7(pBlock, pPixels);
  1151. }
  1152. // Full ETC2 color blocks (planar/T/H modes) is currently unsupported in basisu, but we do support ETC2 with alpha (using ETC1 for color)
  1153. case texture_format::cETC2_RGB:
  1154. case texture_format::cETC1:
  1155. case texture_format::cETC1S:
  1156. {
  1157. return unpack_etc1(*static_cast<const etc_block*>(pBlock), pPixels);
  1158. }
  1159. case texture_format::cETC2_RGBA:
  1160. {
  1161. if (!unpack_etc1(static_cast<const etc_block*>(pBlock)[1], pPixels))
  1162. return false;
  1163. unpack_etc2_eac(pBlock, pPixels);
  1164. break;
  1165. }
  1166. case texture_format::cETC2_ALPHA:
  1167. {
  1168. // Unpack to A
  1169. unpack_etc2_eac(pBlock, pPixels);
  1170. break;
  1171. }
  1172. case texture_format::cBC6HSigned:
  1173. case texture_format::cBC6HUnsigned:
  1174. case texture_format::cASTC_HDR_4x4:
  1175. case texture_format::cUASTC_HDR_4x4:
  1176. {
  1177. // Can't unpack HDR blocks in unpack_block() because it returns 32bpp pixel data.
  1178. assert(0);
  1179. return false;
  1180. }
  1181. case texture_format::cASTC_LDR_4x4:
  1182. {
  1183. const bool astc_srgb = false;
  1184. bool status = basisu_astc::astc::decompress_ldr(reinterpret_cast<uint8_t*>(pPixels), static_cast<const uint8_t*>(pBlock), astc_srgb, 4, 4);
  1185. assert(status);
  1186. if (!status)
  1187. return false;
  1188. break;
  1189. }
  1190. case texture_format::cATC_RGB:
  1191. {
  1192. unpack_atc(pBlock, pPixels);
  1193. break;
  1194. }
  1195. case texture_format::cATC_RGBA_INTERPOLATED_ALPHA:
  1196. {
  1197. unpack_atc(static_cast<const uint8_t*>(pBlock) + 8, pPixels);
  1198. unpack_bc4(pBlock, &pPixels[0].a, sizeof(color_rgba));
  1199. break;
  1200. }
  1201. case texture_format::cFXT1_RGB:
  1202. {
  1203. unpack_fxt1(pBlock, pPixels);
  1204. break;
  1205. }
  1206. case texture_format::cPVRTC2_4_RGBA:
  1207. {
  1208. unpack_pvrtc2(pBlock, pPixels);
  1209. break;
  1210. }
  1211. case texture_format::cETC2_R11_EAC:
  1212. {
  1213. unpack_etc2_eac_r(static_cast<const etc2_eac_r11 *>(pBlock), pPixels, 0);
  1214. break;
  1215. }
  1216. case texture_format::cETC2_RG11_EAC:
  1217. {
  1218. unpack_etc2_eac_rg(pBlock, pPixels);
  1219. break;
  1220. }
  1221. case texture_format::cUASTC4x4:
  1222. {
  1223. unpack_uastc(pBlock, pPixels);
  1224. break;
  1225. }
  1226. default:
  1227. {
  1228. assert(0);
  1229. // TODO
  1230. return false;
  1231. }
  1232. }
  1233. return true;
  1234. }
  1235. bool unpack_block_hdr(texture_format fmt, const void* pBlock, vec4F* pPixels)
  1236. {
  1237. switch (fmt)
  1238. {
  1239. case texture_format::cASTC_HDR_4x4:
  1240. case texture_format::cUASTC_HDR_4x4:
  1241. {
  1242. #if 1
  1243. bool status = basisu_astc::astc::decompress_hdr(&pPixels[0][0], (uint8_t*)pBlock, 4, 4);
  1244. assert(status);
  1245. if (!status)
  1246. return false;
  1247. #else
  1248. basist::half_float half_block[16][4];
  1249. astc_helpers::log_astc_block log_blk;
  1250. if (!astc_helpers::unpack_block(pBlock, log_blk, 4, 4))
  1251. return false;
  1252. if (!astc_helpers::decode_block(log_blk, half_block, 4, 4, astc_helpers::cDecodeModeHDR16))
  1253. return false;
  1254. for (uint32_t p = 0; p < 16; p++)
  1255. {
  1256. pPixels[p][0] = basist::half_to_float(half_block[p][0]);
  1257. pPixels[p][1] = basist::half_to_float(half_block[p][1]);
  1258. pPixels[p][2] = basist::half_to_float(half_block[p][2]);
  1259. pPixels[p][3] = basist::half_to_float(half_block[p][3]);
  1260. }
  1261. //memset(pPixels, 0, sizeof(vec4F) * 16);
  1262. #endif
  1263. return true;
  1264. }
  1265. case texture_format::cBC6HSigned:
  1266. case texture_format::cBC6HUnsigned:
  1267. {
  1268. basist::half_float half_block[16][3];
  1269. unpack_bc6h(pBlock, half_block, fmt == texture_format::cBC6HSigned);
  1270. for (uint32_t p = 0; p < 16; p++)
  1271. {
  1272. pPixels[p][0] = basist::half_to_float(half_block[p][0]);
  1273. pPixels[p][1] = basist::half_to_float(half_block[p][1]);
  1274. pPixels[p][2] = basist::half_to_float(half_block[p][2]);
  1275. pPixels[p][3] = 1.0f;
  1276. }
  1277. return true;
  1278. }
  1279. default:
  1280. {
  1281. break;
  1282. }
  1283. }
  1284. assert(0);
  1285. return false;
  1286. }
  1287. bool gpu_image::unpack(image& img) const
  1288. {
  1289. img.resize(get_pixel_width(), get_pixel_height());
  1290. img.set_all(g_black_color);
  1291. if (!img.get_width() || !img.get_height())
  1292. return true;
  1293. if ((m_fmt == texture_format::cPVRTC1_4_RGB) || (m_fmt == texture_format::cPVRTC1_4_RGBA))
  1294. {
  1295. pvrtc4_image pi(m_width, m_height);
  1296. if (get_total_blocks() != pi.get_total_blocks())
  1297. return false;
  1298. memcpy(&pi.get_blocks()[0], get_ptr(), get_size_in_bytes());
  1299. pi.deswizzle();
  1300. pi.unpack_all_pixels(img);
  1301. return true;
  1302. }
  1303. assert((m_block_width <= cMaxBlockSize) && (m_block_height <= cMaxBlockSize));
  1304. color_rgba pixels[cMaxBlockSize * cMaxBlockSize];
  1305. for (uint32_t i = 0; i < cMaxBlockSize * cMaxBlockSize; i++)
  1306. pixels[i] = g_black_color;
  1307. bool success = true;
  1308. for (uint32_t by = 0; by < m_blocks_y; by++)
  1309. {
  1310. for (uint32_t bx = 0; bx < m_blocks_x; bx++)
  1311. {
  1312. const void* pBlock = get_block_ptr(bx, by);
  1313. if (!unpack_block(m_fmt, pBlock, pixels))
  1314. success = false;
  1315. img.set_block_clipped(pixels, bx * m_block_width, by * m_block_height, m_block_width, m_block_height);
  1316. } // bx
  1317. } // by
  1318. return success;
  1319. }
  1320. bool gpu_image::unpack_hdr(imagef& img) const
  1321. {
  1322. if ((m_fmt != texture_format::cASTC_HDR_4x4) &&
  1323. (m_fmt != texture_format::cUASTC_HDR_4x4) &&
  1324. (m_fmt != texture_format::cBC6HUnsigned) &&
  1325. (m_fmt != texture_format::cBC6HSigned))
  1326. {
  1327. // Can't call on LDR images, at least currently. (Could unpack the LDR data and convert to float.)
  1328. assert(0);
  1329. return false;
  1330. }
  1331. img.resize(get_pixel_width(), get_pixel_height());
  1332. img.set_all(vec4F(0.0f));
  1333. if (!img.get_width() || !img.get_height())
  1334. return true;
  1335. assert((m_block_width <= cMaxBlockSize) && (m_block_height <= cMaxBlockSize));
  1336. vec4F pixels[cMaxBlockSize * cMaxBlockSize];
  1337. clear_obj(pixels);
  1338. bool success = true;
  1339. for (uint32_t by = 0; by < m_blocks_y; by++)
  1340. {
  1341. for (uint32_t bx = 0; bx < m_blocks_x; bx++)
  1342. {
  1343. const void* pBlock = get_block_ptr(bx, by);
  1344. if (!unpack_block_hdr(m_fmt, pBlock, pixels))
  1345. success = false;
  1346. img.set_block_clipped(pixels, bx * m_block_width, by * m_block_height, m_block_width, m_block_height);
  1347. } // bx
  1348. } // by
  1349. return success;
  1350. }
  1351. // KTX1 texture file writing
  1352. static const uint8_t g_ktx_file_id[12] = { 0xAB, 0x4B, 0x54, 0x58, 0x20, 0x31, 0x31, 0xBB, 0x0D, 0x0A, 0x1A, 0x0A };
  1353. // KTX/GL enums
  1354. enum
  1355. {
  1356. KTX_ENDIAN = 0x04030201,
  1357. KTX_OPPOSITE_ENDIAN = 0x01020304,
  1358. KTX_ETC1_RGB8_OES = 0x8D64,
  1359. KTX_RED = 0x1903,
  1360. KTX_RG = 0x8227,
  1361. KTX_RGB = 0x1907,
  1362. KTX_RGBA = 0x1908,
  1363. KTX_COMPRESSED_RGB_S3TC_DXT1_EXT = 0x83F0,
  1364. KTX_COMPRESSED_RGBA_S3TC_DXT5_EXT = 0x83F3,
  1365. KTX_COMPRESSED_RED_RGTC1_EXT = 0x8DBB,
  1366. KTX_COMPRESSED_RED_GREEN_RGTC2_EXT = 0x8DBD,
  1367. KTX_COMPRESSED_RGB8_ETC2 = 0x9274,
  1368. KTX_COMPRESSED_RGBA8_ETC2_EAC = 0x9278,
  1369. KTX_COMPRESSED_RGBA_BPTC_UNORM = 0x8E8C,
  1370. KTX_COMPRESSED_SRGB_ALPHA_BPTC_UNORM = 0x8E8D,
  1371. KTX_COMPRESSED_RGB_BPTC_SIGNED_FLOAT = 0x8E8E,
  1372. KTX_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT = 0x8E8F,
  1373. KTX_COMPRESSED_RGB_PVRTC_4BPPV1_IMG = 0x8C00,
  1374. KTX_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG = 0x8C02,
  1375. KTX_COMPRESSED_RGBA_ASTC_4x4_KHR = 0x93B0,
  1376. KTX_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR = 0x93D0,
  1377. KTX_COMPRESSED_RGBA_UASTC_4x4_KHR = 0x94CC, // TODO - Use proper value!
  1378. KTX_ATC_RGB_AMD = 0x8C92,
  1379. KTX_ATC_RGBA_INTERPOLATED_ALPHA_AMD = 0x87EE,
  1380. KTX_COMPRESSED_RGB_FXT1_3DFX = 0x86B0,
  1381. KTX_COMPRESSED_RGBA_FXT1_3DFX = 0x86B1,
  1382. KTX_COMPRESSED_RGBA_PVRTC_4BPPV2_IMG = 0x9138,
  1383. KTX_COMPRESSED_R11_EAC = 0x9270,
  1384. KTX_COMPRESSED_RG11_EAC = 0x9272
  1385. };
  1386. struct ktx_header
  1387. {
  1388. uint8_t m_identifier[12];
  1389. packed_uint<4> m_endianness;
  1390. packed_uint<4> m_glType;
  1391. packed_uint<4> m_glTypeSize;
  1392. packed_uint<4> m_glFormat;
  1393. packed_uint<4> m_glInternalFormat;
  1394. packed_uint<4> m_glBaseInternalFormat;
  1395. packed_uint<4> m_pixelWidth;
  1396. packed_uint<4> m_pixelHeight;
  1397. packed_uint<4> m_pixelDepth;
  1398. packed_uint<4> m_numberOfArrayElements;
  1399. packed_uint<4> m_numberOfFaces;
  1400. packed_uint<4> m_numberOfMipmapLevels;
  1401. packed_uint<4> m_bytesOfKeyValueData;
  1402. void clear() { clear_obj(*this); }
  1403. };
  1404. // Input is a texture array of mipmapped gpu_image's: gpu_images[array_index][level_index]
  1405. bool create_ktx_texture_file(uint8_vec &ktx_data, const basisu::vector<gpu_image_vec>& gpu_images, bool cubemap_flag)
  1406. {
  1407. if (!gpu_images.size())
  1408. {
  1409. assert(0);
  1410. return false;
  1411. }
  1412. uint32_t width = 0, height = 0, total_levels = 0;
  1413. basisu::texture_format fmt = texture_format::cInvalidTextureFormat;
  1414. // Sanity check the input
  1415. if (cubemap_flag)
  1416. {
  1417. if ((gpu_images.size() % 6) != 0)
  1418. {
  1419. assert(0);
  1420. return false;
  1421. }
  1422. }
  1423. for (uint32_t array_index = 0; array_index < gpu_images.size(); array_index++)
  1424. {
  1425. const gpu_image_vec &levels = gpu_images[array_index];
  1426. if (!levels.size())
  1427. {
  1428. // Empty mip chain
  1429. assert(0);
  1430. return false;
  1431. }
  1432. if (!array_index)
  1433. {
  1434. width = levels[0].get_pixel_width();
  1435. height = levels[0].get_pixel_height();
  1436. total_levels = (uint32_t)levels.size();
  1437. fmt = levels[0].get_format();
  1438. }
  1439. else
  1440. {
  1441. if ((width != levels[0].get_pixel_width()) ||
  1442. (height != levels[0].get_pixel_height()) ||
  1443. (total_levels != levels.size()))
  1444. {
  1445. // All cubemap/texture array faces must be the same dimension
  1446. assert(0);
  1447. return false;
  1448. }
  1449. }
  1450. for (uint32_t level_index = 0; level_index < levels.size(); level_index++)
  1451. {
  1452. if (level_index)
  1453. {
  1454. if ( (levels[level_index].get_pixel_width() != maximum<uint32_t>(1, levels[0].get_pixel_width() >> level_index)) ||
  1455. (levels[level_index].get_pixel_height() != maximum<uint32_t>(1, levels[0].get_pixel_height() >> level_index)) )
  1456. {
  1457. // Malformed mipmap chain
  1458. assert(0);
  1459. return false;
  1460. }
  1461. }
  1462. if (fmt != levels[level_index].get_format())
  1463. {
  1464. // All input textures must use the same GPU format
  1465. assert(0);
  1466. return false;
  1467. }
  1468. }
  1469. }
  1470. uint32_t internal_fmt = KTX_ETC1_RGB8_OES, base_internal_fmt = KTX_RGB;
  1471. switch (fmt)
  1472. {
  1473. case texture_format::cBC1:
  1474. case texture_format::cBC1_NV:
  1475. case texture_format::cBC1_AMD:
  1476. {
  1477. internal_fmt = KTX_COMPRESSED_RGB_S3TC_DXT1_EXT;
  1478. break;
  1479. }
  1480. case texture_format::cBC3:
  1481. {
  1482. internal_fmt = KTX_COMPRESSED_RGBA_S3TC_DXT5_EXT;
  1483. base_internal_fmt = KTX_RGBA;
  1484. break;
  1485. }
  1486. case texture_format::cBC4:
  1487. {
  1488. internal_fmt = KTX_COMPRESSED_RED_RGTC1_EXT;// KTX_COMPRESSED_LUMINANCE_LATC1_EXT;
  1489. base_internal_fmt = KTX_RED;
  1490. break;
  1491. }
  1492. case texture_format::cBC5:
  1493. {
  1494. internal_fmt = KTX_COMPRESSED_RED_GREEN_RGTC2_EXT;
  1495. base_internal_fmt = KTX_RG;
  1496. break;
  1497. }
  1498. case texture_format::cETC1:
  1499. case texture_format::cETC1S:
  1500. {
  1501. internal_fmt = KTX_ETC1_RGB8_OES;
  1502. break;
  1503. }
  1504. case texture_format::cETC2_RGB:
  1505. {
  1506. internal_fmt = KTX_COMPRESSED_RGB8_ETC2;
  1507. break;
  1508. }
  1509. case texture_format::cETC2_RGBA:
  1510. {
  1511. internal_fmt = KTX_COMPRESSED_RGBA8_ETC2_EAC;
  1512. base_internal_fmt = KTX_RGBA;
  1513. break;
  1514. }
  1515. case texture_format::cBC6HSigned:
  1516. {
  1517. internal_fmt = KTX_COMPRESSED_RGB_BPTC_SIGNED_FLOAT;
  1518. base_internal_fmt = KTX_RGBA;
  1519. break;
  1520. }
  1521. case texture_format::cBC6HUnsigned:
  1522. {
  1523. internal_fmt = KTX_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT;
  1524. base_internal_fmt = KTX_RGBA;
  1525. break;
  1526. }
  1527. case texture_format::cBC7:
  1528. {
  1529. internal_fmt = KTX_COMPRESSED_RGBA_BPTC_UNORM;
  1530. base_internal_fmt = KTX_RGBA;
  1531. break;
  1532. }
  1533. case texture_format::cPVRTC1_4_RGB:
  1534. {
  1535. internal_fmt = KTX_COMPRESSED_RGB_PVRTC_4BPPV1_IMG;
  1536. break;
  1537. }
  1538. case texture_format::cPVRTC1_4_RGBA:
  1539. {
  1540. internal_fmt = KTX_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG;
  1541. base_internal_fmt = KTX_RGBA;
  1542. break;
  1543. }
  1544. // We use different enums for HDR vs. LDR ASTC, but internally they are both just ASTC.
  1545. case texture_format::cASTC_LDR_4x4:
  1546. case texture_format::cASTC_HDR_4x4:
  1547. case texture_format::cUASTC_HDR_4x4: // UASTC_HDR is just HDR-only ASTC
  1548. {
  1549. internal_fmt = KTX_COMPRESSED_RGBA_ASTC_4x4_KHR;
  1550. base_internal_fmt = KTX_RGBA;
  1551. break;
  1552. }
  1553. case texture_format::cATC_RGB:
  1554. {
  1555. internal_fmt = KTX_ATC_RGB_AMD;
  1556. break;
  1557. }
  1558. case texture_format::cATC_RGBA_INTERPOLATED_ALPHA:
  1559. {
  1560. internal_fmt = KTX_ATC_RGBA_INTERPOLATED_ALPHA_AMD;
  1561. base_internal_fmt = KTX_RGBA;
  1562. break;
  1563. }
  1564. case texture_format::cETC2_R11_EAC:
  1565. {
  1566. internal_fmt = KTX_COMPRESSED_R11_EAC;
  1567. base_internal_fmt = KTX_RED;
  1568. break;
  1569. }
  1570. case texture_format::cETC2_RG11_EAC:
  1571. {
  1572. internal_fmt = KTX_COMPRESSED_RG11_EAC;
  1573. base_internal_fmt = KTX_RG;
  1574. break;
  1575. }
  1576. case texture_format::cUASTC4x4:
  1577. {
  1578. internal_fmt = KTX_COMPRESSED_RGBA_UASTC_4x4_KHR;
  1579. base_internal_fmt = KTX_RGBA;
  1580. break;
  1581. }
  1582. case texture_format::cFXT1_RGB:
  1583. {
  1584. internal_fmt = KTX_COMPRESSED_RGB_FXT1_3DFX;
  1585. break;
  1586. }
  1587. case texture_format::cPVRTC2_4_RGBA:
  1588. {
  1589. internal_fmt = KTX_COMPRESSED_RGBA_PVRTC_4BPPV2_IMG;
  1590. base_internal_fmt = KTX_RGBA;
  1591. break;
  1592. }
  1593. default:
  1594. {
  1595. // TODO
  1596. assert(0);
  1597. return false;
  1598. }
  1599. }
  1600. ktx_header header;
  1601. header.clear();
  1602. memcpy(&header.m_identifier, g_ktx_file_id, sizeof(g_ktx_file_id));
  1603. header.m_endianness = KTX_ENDIAN;
  1604. header.m_pixelWidth = width;
  1605. header.m_pixelHeight = height;
  1606. header.m_glTypeSize = 1;
  1607. header.m_glInternalFormat = internal_fmt;
  1608. header.m_glBaseInternalFormat = base_internal_fmt;
  1609. header.m_numberOfArrayElements = (uint32_t)(cubemap_flag ? (gpu_images.size() / 6) : gpu_images.size());
  1610. if (header.m_numberOfArrayElements == 1)
  1611. header.m_numberOfArrayElements = 0;
  1612. header.m_numberOfMipmapLevels = total_levels;
  1613. header.m_numberOfFaces = cubemap_flag ? 6 : 1;
  1614. append_vector(ktx_data, (uint8_t*)&header, sizeof(header));
  1615. for (uint32_t level_index = 0; level_index < total_levels; level_index++)
  1616. {
  1617. uint32_t img_size = gpu_images[0][level_index].get_size_in_bytes();
  1618. if ((header.m_numberOfFaces == 1) || (header.m_numberOfArrayElements > 1))
  1619. {
  1620. img_size = img_size * header.m_numberOfFaces * maximum<uint32_t>(1, header.m_numberOfArrayElements);
  1621. }
  1622. assert(img_size && ((img_size & 3) == 0));
  1623. packed_uint<4> packed_img_size(img_size);
  1624. append_vector(ktx_data, (uint8_t*)&packed_img_size, sizeof(packed_img_size));
  1625. uint32_t bytes_written = 0;
  1626. (void)bytes_written;
  1627. for (uint32_t array_index = 0; array_index < maximum<uint32_t>(1, header.m_numberOfArrayElements); array_index++)
  1628. {
  1629. for (uint32_t face_index = 0; face_index < header.m_numberOfFaces; face_index++)
  1630. {
  1631. const gpu_image& img = gpu_images[cubemap_flag ? (array_index * 6 + face_index) : array_index][level_index];
  1632. append_vector(ktx_data, (uint8_t*)img.get_ptr(), img.get_size_in_bytes());
  1633. bytes_written += img.get_size_in_bytes();
  1634. }
  1635. } // array_index
  1636. } // level_index
  1637. return true;
  1638. }
  1639. bool does_dds_support_format(texture_format fmt)
  1640. {
  1641. switch (fmt)
  1642. {
  1643. case texture_format::cBC1_NV:
  1644. case texture_format::cBC1_AMD:
  1645. case texture_format::cBC1:
  1646. case texture_format::cBC3:
  1647. case texture_format::cBC4:
  1648. case texture_format::cBC5:
  1649. case texture_format::cBC6HSigned:
  1650. case texture_format::cBC6HUnsigned:
  1651. case texture_format::cBC7:
  1652. return true;
  1653. default:
  1654. break;
  1655. }
  1656. return false;
  1657. }
  1658. // Only supports the basic DirectX BC texture formats.
  1659. // gpu_images array is: [face/layer][mipmap level]
  1660. // For cubemap arrays, # of face/layers must be a multiple of 6.
  1661. // Accepts 2D, 2D mipmapped, 2D array, 2D array mipmapped
  1662. // and cubemap, cubemap mipmapped, and cubemap array mipmapped.
  1663. bool write_dds_file(uint8_vec &dds_data, const basisu::vector<gpu_image_vec>& gpu_images, bool cubemap_flag, bool use_srgb_format)
  1664. {
  1665. return false;
  1666. }
  1667. bool write_dds_file(const char* pFilename, const basisu::vector<gpu_image_vec>& gpu_images, bool cubemap_flag, bool use_srgb_format)
  1668. {
  1669. uint8_vec dds_data;
  1670. if (!write_dds_file(dds_data, gpu_images, cubemap_flag, use_srgb_format))
  1671. return false;
  1672. if (!write_vec_to_file(pFilename, dds_data))
  1673. {
  1674. fprintf(stderr, "write_dds_file: Failed writing DDS file data\n");
  1675. return false;
  1676. }
  1677. return true;
  1678. }
  1679. bool read_uncompressed_dds_file(const char* pFilename, basisu::vector<image> &ldr_mips, basisu::vector<imagef>& hdr_mips)
  1680. {
  1681. return false;
  1682. }
  1683. bool write_compressed_texture_file(const char* pFilename, const basisu::vector<gpu_image_vec>& g, bool cubemap_flag, bool use_srgb_format)
  1684. {
  1685. std::string extension(string_tolower(string_get_extension(pFilename)));
  1686. uint8_vec filedata;
  1687. if (extension == "ktx")
  1688. {
  1689. if (!create_ktx_texture_file(filedata, g, cubemap_flag))
  1690. return false;
  1691. }
  1692. else if (extension == "pvr")
  1693. {
  1694. // TODO
  1695. return false;
  1696. }
  1697. else if (extension == "dds")
  1698. {
  1699. if (!write_dds_file(filedata, g, cubemap_flag, use_srgb_format))
  1700. return false;
  1701. }
  1702. else
  1703. {
  1704. // unsupported texture format
  1705. assert(0);
  1706. return false;
  1707. }
  1708. return basisu::write_vec_to_file(pFilename, filedata);
  1709. }
  1710. bool write_compressed_texture_file(const char* pFilename, const gpu_image_vec& g, bool use_srgb_format)
  1711. {
  1712. basisu::vector<gpu_image_vec> a;
  1713. a.push_back(g);
  1714. return write_compressed_texture_file(pFilename, a, false, use_srgb_format);
  1715. }
  1716. bool write_compressed_texture_file(const char* pFilename, const gpu_image& g, bool use_srgb_format)
  1717. {
  1718. basisu::vector<gpu_image_vec> v;
  1719. enlarge_vector(v, 1)->push_back(g);
  1720. return write_compressed_texture_file(pFilename, v, false, use_srgb_format);
  1721. }
  1722. //const uint32_t OUT_FILE_MAGIC = 'TEXC';
  1723. struct out_file_header
  1724. {
  1725. packed_uint<4> m_magic;
  1726. packed_uint<4> m_pad;
  1727. packed_uint<4> m_width;
  1728. packed_uint<4> m_height;
  1729. };
  1730. // As no modern tool supports FXT1 format .KTX files, let's write .OUT files and make sure 3DFX's original tools shipped in 1999 can decode our encoded output.
  1731. bool write_3dfx_out_file(const char* pFilename, const gpu_image& gi)
  1732. {
  1733. out_file_header hdr;
  1734. //hdr.m_magic = OUT_FILE_MAGIC;
  1735. hdr.m_magic.m_bytes[0] = 67;
  1736. hdr.m_magic.m_bytes[1] = 88;
  1737. hdr.m_magic.m_bytes[2] = 69;
  1738. hdr.m_magic.m_bytes[3] = 84;
  1739. hdr.m_pad = 0;
  1740. hdr.m_width = gi.get_blocks_x() * 8;
  1741. hdr.m_height = gi.get_blocks_y() * 4;
  1742. FILE* pFile = nullptr;
  1743. #ifdef _WIN32
  1744. fopen_s(&pFile, pFilename, "wb");
  1745. #else
  1746. pFile = fopen(pFilename, "wb");
  1747. #endif
  1748. if (!pFile)
  1749. return false;
  1750. fwrite(&hdr, sizeof(hdr), 1, pFile);
  1751. fwrite(gi.get_ptr(), gi.get_size_in_bytes(), 1, pFile);
  1752. return fclose(pFile) != EOF;
  1753. }
  1754. // The .astc texture format is readable using ARM's astcenc, AMD Compressonator, and other engines/tools. It oddly doesn't support mipmaps, limiting
  1755. // its usefulness/relevance.
  1756. // https://github.com/ARM-software/astc-encoder/blob/main/Docs/FileFormat.md
  1757. bool write_astc_file(const char* pFilename, const void* pBlocks, uint32_t block_width, uint32_t block_height, uint32_t dim_x, uint32_t dim_y)
  1758. {
  1759. assert(pBlocks && (block_width >= 4) && (block_height >= 4) && (dim_x > 0) && (dim_y > 0));
  1760. uint8_vec file_data;
  1761. file_data.push_back(0x13);
  1762. file_data.push_back(0xAB);
  1763. file_data.push_back(0xA1);
  1764. file_data.push_back(0x5C);
  1765. file_data.push_back((uint8_t)block_width);
  1766. file_data.push_back((uint8_t)block_height);
  1767. file_data.push_back(1);
  1768. file_data.push_back((uint8_t)dim_x);
  1769. file_data.push_back((uint8_t)(dim_x >> 8));
  1770. file_data.push_back((uint8_t)(dim_x >> 16));
  1771. file_data.push_back((uint8_t)dim_y);
  1772. file_data.push_back((uint8_t)(dim_y >> 8));
  1773. file_data.push_back((uint8_t)(dim_y >> 16));
  1774. file_data.push_back((uint8_t)1);
  1775. file_data.push_back((uint8_t)0);
  1776. file_data.push_back((uint8_t)0);
  1777. const uint32_t num_blocks_x = (dim_x + block_width - 1) / block_width;
  1778. const uint32_t num_blocks_y = (dim_y + block_height - 1) / block_height;
  1779. const uint32_t total_bytes = num_blocks_x * num_blocks_y * 16;
  1780. const size_t cur_size = file_data.size();
  1781. file_data.resize(cur_size + total_bytes);
  1782. memcpy(&file_data[cur_size], pBlocks, total_bytes);
  1783. return write_vec_to_file(pFilename, file_data);
  1784. }
  1785. } // basisu