kernel.ispc 104 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710
  1. /*
  2. Copyright (c) 2013, Intel Corporation
  3. All rights reserved.
  4. Redistribution and use in source and binary forms, with or without
  5. modification, are permitted provided that the following conditions are
  6. met:
  7. * Redistributions of source code must retain the above copyright
  8. notice, this list of conditions and the following disclaimer.
  9. * Redistributions in binary form must reproduce the above copyright
  10. notice, this list of conditions and the following disclaimer in the
  11. documentation and/or other materials provided with the distribution.
  12. * Neither the name of Intel Corporation nor the names of its
  13. contributors may be used to endorse or promote products derived from
  14. this software without specific prior written permission.
  15. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
  16. IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
  17. TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
  18. PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
  19. OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  20. EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  21. PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  22. PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  23. LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  24. NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  25. SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  26. */
  27. typedef unsigned int8 uint8;
  28. typedef unsigned int32 uint32;
  29. typedef unsigned int64 uint64;
  30. ///////////////////////////
  31. // generic helpers
  32. inline void swap_ints(int u[], int v[], uniform int n)
  33. {
  34. for (uniform int i=0; i<n; i++)
  35. {
  36. int t = u[i];
  37. u[i] = v[i];
  38. v[i] = t;
  39. }
  40. }
  41. inline void swap_uints(uint32 u[], uint32 v[], uniform int n)
  42. {
  43. for (uniform int i=0; i<n; i++)
  44. {
  45. uint32 t = u[i];
  46. u[i] = v[i];
  47. v[i] = t;
  48. }
  49. }
  50. inline float sq(float v)
  51. {
  52. return v*v;
  53. }
  54. inline int pow2(int x)
  55. {
  56. return 1<<x;
  57. }
  58. inline float clamp(float v, int a, int b)
  59. {
  60. return clamp(v, (float)a, (float)b);
  61. }
  62. // the following helpers isolate performance warnings
  63. inline unsigned int32 gather_uint(const uniform unsigned int32* const uniform ptr, int idx)
  64. {
  65. return ptr[idx]; // (perf warning expected)
  66. }
  67. inline unsigned int32 gather_uint(const varying unsigned int32* const uniform ptr, int idx)
  68. {
  69. return ptr[idx]; // (perf warning expected)
  70. }
  71. inline int32 gather_int(const uniform int32* const uniform ptr, int idx)
  72. {
  73. return ptr[idx]; // (perf warning expected)
  74. }
  75. inline float gather_float(varying float* uniform ptr, int idx)
  76. {
  77. return ptr[idx]; // (perf warning expected)
  78. }
  79. inline void scatter_uint(uniform unsigned int32* ptr, int idx, uint32 value)
  80. {
  81. ptr[idx] = value; // (perf warning expected)
  82. }
  83. inline void scatter_int(varying int32* uniform ptr, int idx, uint32 value)
  84. {
  85. ptr[idx] = value; // (perf warning expected)
  86. }
  87. inline uint32 shift_right(uint32 v, const uniform int bits)
  88. {
  89. return v>>bits; // (perf warning expected)
  90. }
  91. ///////////////////////////////////////////////////////////
  92. // BC1/BC7 shared
  93. struct rgba_surface
  94. {
  95. uint8* ptr;
  96. int width, height, stride;
  97. };
  98. inline void load_block_interleaved(float block[48], uniform rgba_surface* uniform src, int xx, uniform int yy)
  99. {
  100. for (uniform int y = 0; y<4; y++)
  101. for (uniform int x = 0; x<4; x++)
  102. {
  103. uniform unsigned int32* uniform src_ptr = (unsigned int32*)&src->ptr[(yy * 4 + y)*src->stride];
  104. unsigned int32 rgba = gather_uint(src_ptr, xx * 4 + x);
  105. block[16 * 0 + y * 4 + x] = (int)((rgba >> 0) & 255);
  106. block[16 * 1 + y * 4 + x] = (int)((rgba >> 8) & 255);
  107. block[16 * 2 + y * 4 + x] = (int)((rgba >> 16) & 255);
  108. }
  109. }
  110. inline void load_block_interleaved_rgba(float block[64], uniform rgba_surface* uniform src, int xx, uniform int yy)
  111. {
  112. for (uniform int y=0; y<4; y++)
  113. for (uniform int x=0; x<4; x++)
  114. {
  115. uniform unsigned int32* uniform src_ptr = (unsigned int32*)&src->ptr[(yy*4+y)*src->stride];
  116. unsigned int32 rgba = gather_uint(src_ptr, xx*4+x);
  117. block[16*0+y*4+x] = (int)((rgba>> 0)&255);
  118. block[16*1+y*4+x] = (int)((rgba>> 8)&255);
  119. block[16*2+y*4+x] = (int)((rgba>>16)&255);
  120. block[16*3+y*4+x] = (int)((rgba>>24)&255);
  121. }
  122. }
  123. inline void load_block_interleaved_16bit(float block[48], uniform rgba_surface* uniform src, int xx, uniform int yy)
  124. {
  125. for (uniform int y = 0; y<4; y++)
  126. for (uniform int x = 0; x<4; x++)
  127. {
  128. uniform unsigned int32* uniform src_ptr_r = (unsigned int32*)&src->ptr[(yy * 4 + y)*src->stride + 0];
  129. uniform unsigned int32* uniform src_ptr_g = (unsigned int32*)&src->ptr[(yy * 4 + y)*src->stride + 2];
  130. uniform unsigned int32* uniform src_ptr_b = (unsigned int32*)&src->ptr[(yy * 4 + y)*src->stride + 4];
  131. unsigned int32 xr = gather_uint(src_ptr_r, (xx * 4 + x) * 2);
  132. unsigned int32 xg = gather_uint(src_ptr_g, (xx * 4 + x) * 2);
  133. unsigned int32 xb = gather_uint(src_ptr_b, (xx * 4 + x) * 2);
  134. block[16 * 0 + y * 4 + x] = (int)(xr & 0xFFFF);
  135. block[16 * 1 + y * 4 + x] = (int)(xg & 0xFFFF);
  136. block[16 * 2 + y * 4 + x] = (int)(xb & 0xFFFF);
  137. block[16 * 3 + y * 4 + x] = 0;
  138. }
  139. }
  140. inline void store_data(uniform uint8 dst[], int width, int xx, uniform int yy, uint32 data[], int data_size)
  141. {
  142. for (uniform int k=0; k<data_size; k++)
  143. {
  144. uniform uint32* dst_ptr = (uint32*)&dst[(yy)*width*data_size];
  145. scatter_uint(dst_ptr, xx*data_size+k, data[k]);
  146. }
  147. }
  148. inline void ssymv(float a[3], float covar[6], float b[3])
  149. {
  150. a[0] = covar[0]*b[0]+covar[1]*b[1]+covar[2]*b[2];
  151. a[1] = covar[1]*b[0]+covar[3]*b[1]+covar[4]*b[2];
  152. a[2] = covar[2]*b[0]+covar[4]*b[1]+covar[5]*b[2];
  153. }
  154. inline void ssymv3(float a[4], float covar[10], float b[4])
  155. {
  156. a[0] = covar[0]*b[0]+covar[1]*b[1]+covar[2]*b[2];
  157. a[1] = covar[1]*b[0]+covar[4]*b[1]+covar[5]*b[2];
  158. a[2] = covar[2]*b[0]+covar[5]*b[1]+covar[7]*b[2];
  159. }
  160. inline void ssymv4(float a[4], float covar[10], float b[4])
  161. {
  162. a[0] = covar[0]*b[0]+covar[1]*b[1]+covar[2]*b[2]+covar[3]*b[3];
  163. a[1] = covar[1]*b[0]+covar[4]*b[1]+covar[5]*b[2]+covar[6]*b[3];
  164. a[2] = covar[2]*b[0]+covar[5]*b[1]+covar[7]*b[2]+covar[8]*b[3];
  165. a[3] = covar[3]*b[0]+covar[6]*b[1]+covar[8]*b[2]+covar[9]*b[3];
  166. }
  167. inline void compute_axis3(float axis[3], float covar[6], uniform const int powerIterations)
  168. {
  169. float vec[3] = {1,1,1};
  170. for (uniform int i=0; i<powerIterations; i++)
  171. {
  172. ssymv(axis, covar, vec);
  173. for (uniform int p=0; p<3; p++) vec[p] = axis[p];
  174. if (i%2==1) // renormalize every other iteration
  175. {
  176. float norm_sq = 0;
  177. for (uniform int p=0; p<3; p++)
  178. norm_sq += axis[p]*axis[p];
  179. float rnorm = rsqrt(norm_sq);
  180. for (uniform int p=0; p<3; p++) vec[p] *= rnorm;
  181. }
  182. }
  183. for (uniform int p=0; p<3; p++) axis[p] = vec[p];
  184. }
  185. inline void compute_axis(float axis[4], float covar[10], uniform const int powerIterations, uniform int channels)
  186. {
  187. float vec[4] = {1,1,1,1};
  188. for (uniform int i=0; i<powerIterations; i++)
  189. {
  190. if (channels == 3) ssymv3(axis, covar, vec);
  191. if (channels == 4) ssymv4(axis, covar, vec);
  192. for (uniform int p=0; p<channels; p++) vec[p] = axis[p];
  193. if (i%2==1) // renormalize every other iteration
  194. {
  195. float norm_sq = 0;
  196. for (uniform int p=0; p<channels; p++)
  197. norm_sq += axis[p]*axis[p];
  198. float rnorm = rsqrt(norm_sq);
  199. for (uniform int p=0; p<channels; p++) vec[p] *= rnorm;
  200. }
  201. }
  202. for (uniform int p=0; p<channels; p++) axis[p] = vec[p];
  203. }
  204. ///////////////////////////////////////////////////////////
  205. // BC1/BC3 encoding
  206. inline int stb__Mul8Bit(int a, int b)
  207. {
  208. int t = a*b + 128;
  209. return (t + (t >> 8)) >> 8;
  210. }
  211. inline unsigned int16 stb__As16Bit(int r, int g, int b)
  212. {
  213. return (stb__Mul8Bit(r,31) << 11) + (stb__Mul8Bit(g,63) << 5) + stb__Mul8Bit(b,31);
  214. }
  215. inline unsigned int16 enc_rgb565(float c[3])
  216. {
  217. return stb__As16Bit((int)c[0], (int)c[1], (int)c[2]);
  218. }
  219. inline void dec_rgb565(float c[3], int p)
  220. {
  221. int c2 = (p>>0)&31;
  222. int c1 = (p>>5)&63;
  223. int c0 = (p>>11)&31;
  224. c[0] = (c0<<3)+(c0>>2);
  225. c[1] = (c1<<2)+(c1>>4);
  226. c[2] = (c2<<3)+(c2>>2);
  227. }
  228. inline void pick_endpoints_dc(int c0[3], int c1[3], int block[48], int iaxis[3])
  229. {
  230. for (uniform int p=0; p<3; p++)
  231. for (uniform int y=0; y<4; y++)
  232. for (uniform int x=0; x<4; x++)
  233. {
  234. c0[p] += block[p*16+y*4+x];
  235. }
  236. for (uniform int p=0; p<3; p++)
  237. c0[p] >>= 4;
  238. }
  239. inline void pick_endpoints(float c0[3], float c1[3], float block[48], float axis[3], float dc[3])
  240. {
  241. float min_dot = 256*256;
  242. float max_dot = 0;
  243. for (uniform int y=0; y<4; y++)
  244. for (uniform int x=0; x<4; x++)
  245. {
  246. float dot = 0;
  247. for (uniform int p=0; p<3; p++)
  248. dot += (block[p*16+y*4+x]-dc[p])*axis[p];
  249. min_dot = min(min_dot, dot);
  250. max_dot = max(max_dot, dot);
  251. }
  252. if (max_dot-min_dot < 1f)
  253. {
  254. min_dot -= 0.5f;
  255. max_dot += 0.5f;
  256. }
  257. float norm_sq = 0;
  258. for (uniform int p=0; p<3; p++)
  259. norm_sq += axis[p]*axis[p];
  260. float rnorm_sq = rcp(norm_sq);
  261. for (uniform int p=0; p<3; p++)
  262. {
  263. c0[p] = clamp(dc[p]+min_dot*rnorm_sq*axis[p], 0, 255);
  264. c1[p] = clamp(dc[p]+max_dot*rnorm_sq*axis[p], 0, 255);
  265. }
  266. }
  267. inline uint32 fast_quant(float block[48], int p0, int p1)
  268. {
  269. float c0[3];
  270. float c1[3];
  271. dec_rgb565(c0, p0);
  272. dec_rgb565(c1, p1);
  273. float dir[3];
  274. for (uniform int p=0; p<3; p++) dir[p] = c1[p]-c0[p];
  275. float sq_norm = 0;
  276. for (uniform int p=0; p<3; p++) sq_norm += sq(dir[p]);
  277. float rsq_norm = rcp(sq_norm);
  278. for (uniform int p=0; p<3; p++) dir[p] *= rsq_norm*3;
  279. float bias = 0.5;
  280. for (uniform int p=0; p<3; p++) bias -= c0[p]*dir[p];
  281. uint32 bits = 0;
  282. uint32 scaler = 1;
  283. for (uniform int k=0; k<16; k++)
  284. {
  285. float dot = 0;
  286. for (uniform int p=0; p<3; p++)
  287. dot += block[k+p*16]*dir[p];
  288. int q = clamp((int)(dot+bias), 0, 3);
  289. //bits += q<<(k*2);
  290. bits += q*scaler;
  291. scaler *= 4;
  292. }
  293. return bits;
  294. }
  295. inline void compute_covar_dc(float covar[6], float dc[3], float block[48])
  296. {
  297. for (uniform int i=0; i<6; i++) covar[i] = 0;
  298. for (uniform int p=0; p<3; p++) dc[p] = 0;
  299. for (uniform int k=0; k<16; k++)
  300. {
  301. for (uniform int p=0; p<3; p++)
  302. dc[p] += block[k+p*16];
  303. }
  304. for (uniform int p=0; p<3; p++) dc[p] /= 16;
  305. for (uniform int k=0; k<16; k++)
  306. {
  307. float rgb[3];
  308. for (uniform int p=0; p<3; p++)
  309. rgb[p] = block[k+p*16]-dc[p];
  310. covar[0] += rgb[0]*rgb[0];
  311. covar[1] += rgb[0]*rgb[1];
  312. covar[2] += rgb[0]*rgb[2];
  313. covar[3] += rgb[1]*rgb[1];
  314. covar[4] += rgb[1]*rgb[2];
  315. covar[5] += rgb[2]*rgb[2];
  316. }
  317. }
  318. // ugly, but makes BC1 compression 20% faster overall
  319. inline void compute_covar_dc_ugly(float covar[6], float dc[3], float block[48])
  320. {
  321. for (uniform int p=0; p<3; p++)
  322. {
  323. float acc = 0;
  324. for (uniform int k=0; k<16; k++)
  325. acc += block[k+p*16];
  326. dc[p] = acc/16;
  327. }
  328. float covar0 = 0f;
  329. float covar1 = 0f;
  330. float covar2 = 0f;
  331. float covar3 = 0f;
  332. float covar4 = 0f;
  333. float covar5 = 0f;
  334. for (uniform int k=0; k<16; k++)
  335. {
  336. float rgb0, rgb1, rgb2;
  337. rgb0 = block[k+0*16]-dc[0];
  338. rgb1 = block[k+1*16]-dc[1];
  339. rgb2 = block[k+2*16]-dc[2];
  340. covar0 += rgb0*rgb0;
  341. covar1 += rgb0*rgb1;
  342. covar2 += rgb0*rgb2;
  343. covar3 += rgb1*rgb1;
  344. covar4 += rgb1*rgb2;
  345. covar5 += rgb2*rgb2;
  346. }
  347. covar[0] = covar0;
  348. covar[1] = covar1;
  349. covar[2] = covar2;
  350. covar[3] = covar3;
  351. covar[4] = covar4;
  352. covar[5] = covar5;
  353. }
  354. inline void bc1_refine(int pe[2], float block[48], unsigned int32 bits, float dc[3])
  355. {
  356. float c0[3];
  357. float c1[3];
  358. if ((bits ^ (bits*4)) < 4)
  359. {
  360. // single color
  361. for (uniform int p=0; p<3; p++)
  362. {
  363. c0[p] = dc[p];
  364. c1[p] = dc[p];
  365. }
  366. }
  367. else
  368. {
  369. float Atb1[3] = {0,0,0};
  370. float sum_q = 0;
  371. float sum_qq = 0;
  372. unsigned int32 shifted_bits = bits;
  373. for (uniform int k=0; k<16; k++)
  374. {
  375. float q = (int)(shifted_bits&3);
  376. shifted_bits >>= 2;
  377. float x = 3-q;
  378. float y = q;
  379. sum_q += q;
  380. sum_qq += q*q;
  381. for (uniform int p=0; p<3; p++) Atb1[p] += x*block[k+p*16];
  382. }
  383. float sum[3];
  384. float Atb2[3];
  385. for (uniform int p=0; p<3; p++)
  386. {
  387. sum[p] = dc[p]*16;
  388. Atb2[p] = 3*sum[p]-Atb1[p];
  389. }
  390. float Cxx = 16*sq(3)-2*3*sum_q+sum_qq;
  391. float Cyy = sum_qq;
  392. float Cxy = 3*sum_q-sum_qq;
  393. float scale = 3f * rcp(Cxx*Cyy - Cxy*Cxy);
  394. for (uniform int p=0; p<3; p++)
  395. {
  396. c0[p] = (Atb1[p]*Cyy - Atb2[p]*Cxy)*scale;
  397. c1[p] = (Atb2[p]*Cxx - Atb1[p]*Cxy)*scale;
  398. c0[p] = clamp(c0[p], 0, 255);
  399. c1[p] = clamp(c1[p], 0, 255);
  400. }
  401. }
  402. pe[0] = enc_rgb565(c0);
  403. pe[1] = enc_rgb565(c1);
  404. }
  405. inline uint32 fix_qbits(uint32 qbits)
  406. {
  407. uniform const uint32 mask_01b = 0x55555555;
  408. uniform const uint32 mask_10b = 0xAAAAAAAA;
  409. uint32 qbits0 = qbits&mask_01b;
  410. uint32 qbits1 = qbits&mask_10b;
  411. qbits = (qbits1>>1) + (qbits1 ^ (qbits0<<1));
  412. return qbits;
  413. }
  414. inline void CompressBlockBC1_core(float block[48], uint32 data[2])
  415. {
  416. uniform const int powerIterations = 4;
  417. uniform const int refineIterations = 1;
  418. float covar[6];
  419. float dc[3];
  420. compute_covar_dc_ugly(covar, dc, block);
  421. float eps = 0.001;
  422. covar[0] += eps;
  423. covar[3] += eps;
  424. covar[5] += eps;
  425. float axis[3];
  426. compute_axis3(axis, covar, powerIterations);
  427. float c0[3];
  428. float c1[3];
  429. pick_endpoints(c0, c1, block, axis, dc);
  430. int p[2];
  431. p[0] = enc_rgb565(c0);
  432. p[1] = enc_rgb565(c1);
  433. if (p[0]<p[1]) swap_ints(&p[0], &p[1], 1);
  434. data[0] = (1<<16)*p[1]+p[0];
  435. data[1] = fast_quant(block, p[0], p[1]);
  436. // refine
  437. for (uniform int i=0; i<refineIterations; i++)
  438. {
  439. bc1_refine(p, block, data[1], dc);
  440. if (p[0]<p[1]) swap_ints(&p[0], &p[1], 1);
  441. data[0] = (1<<16)*p[1]+p[0];
  442. data[1] = fast_quant(block, p[0], p[1]);
  443. }
  444. data[1] = fix_qbits(data[1]);
  445. }
  446. inline void CompressBlockBC3_alpha(float block[16], uint32 data[2])
  447. {
  448. float ep[2] = { 255, 0 };
  449. for (uniform int k=0; k<16; k++)
  450. {
  451. ep[0] = min(ep[0], block[k]);
  452. ep[1] = max(ep[1], block[k]);
  453. }
  454. if (ep[0] == ep[1]) ep[1] = ep[0]+0.1f;
  455. uint32 qblock[2] = { 0, 0 };
  456. float scale = 7f/(ep[1]-ep[0]);
  457. for (uniform int k=0; k<16; k++)
  458. {
  459. float v = block[k];
  460. float proj = (v-ep[0])*scale+0.5f;
  461. int q = clamp((int)proj, 0, 7);
  462. q = 7-q;
  463. if (q > 0) q++;
  464. if (q==8) q = 1;
  465. qblock[k/8] |= q << ((k%8)*3);
  466. }
  467. // (could be improved by refinement)
  468. data[0] = clamp((int)ep[0], 0, 255)*256+clamp((int)ep[1], 0, 255);
  469. data[0] |= qblock[0]<<16;
  470. data[1] = qblock[0]>>16;
  471. data[1] |= qblock[1]<<8;
  472. }
  473. inline void CompressBlockBC1(uniform rgba_surface src[], int xx, uniform int yy, uniform uint8 dst[])
  474. {
  475. float block[48];
  476. uint32 data[2];
  477. load_block_interleaved(block, src, xx, yy);
  478. CompressBlockBC1_core(block, data);
  479. store_data(dst, src->width, xx, yy, data, 2);
  480. }
  481. inline void CompressBlockBC3(uniform rgba_surface src[], int xx, uniform int yy, uniform uint8 dst[])
  482. {
  483. float block[64];
  484. uint32 data[4];
  485. load_block_interleaved_rgba(block, src, xx, yy);
  486. CompressBlockBC3_alpha(&block[48], &data[0]);
  487. CompressBlockBC1_core(block, &data[2]);
  488. store_data(dst, src->width, xx, yy, data, 4);
  489. }
  490. static export void CompressBlocksBC1_ispc(uniform rgba_surface src[], uniform uint8 dst[])
  491. {
  492. for (uniform int yy = 0; yy<src->height/4; yy++)
  493. foreach (xx = 0 ... src->width/4)
  494. {
  495. CompressBlockBC1(src, xx, yy, dst);
  496. }
  497. }
  498. static export void CompressBlocksBC3_ispc(uniform rgba_surface src[], uniform uint8 dst[])
  499. {
  500. for (uniform int yy = 0; yy<src->height/4; yy++)
  501. foreach (xx = 0 ... src->width/4)
  502. {
  503. CompressBlockBC3(src, xx, yy, dst);
  504. }
  505. }
  506. ///////////////////////////////////////////////////////////
  507. // BC7 encoding
  508. struct bc7_enc_settings
  509. {
  510. bool mode_selection[4];
  511. int refineIterations[8];
  512. bool skip_mode2;
  513. int fastSkipTreshold_mode1;
  514. int fastSkipTreshold_mode3;
  515. int fastSkipTreshold_mode7;
  516. int mode45_channel0;
  517. int refineIterations_channel;
  518. int channels;
  519. };
  520. struct bc7_enc_state
  521. {
  522. float block[64];
  523. float opaque_err; // error for coding alpha=255
  524. float best_err;
  525. uint32 best_data[5]; // 4, +1 margin for skips
  526. // settings
  527. uniform bool mode_selection[4];
  528. uniform int refineIterations[8];
  529. uniform bool skip_mode2;
  530. uniform int fastSkipTreshold_mode1;
  531. uniform int fastSkipTreshold_mode3;
  532. uniform int fastSkipTreshold_mode7;
  533. uniform int mode45_channel0;
  534. uniform int refineIterations_channel;
  535. uniform int channels;
  536. };
  537. struct mode45_parameters
  538. {
  539. int qep[8];
  540. uint32 qblock[2];
  541. int aqep[2];
  542. uint32 aqblock[2];
  543. int rotation;
  544. int swap;
  545. };
  546. void bc7_code_mode01237(uint32 data[5], int qep[6], uint32 qblock[2], int part_id, uniform int mode);
  547. void bc7_code_mode45(uint32 data[5], mode45_parameters params[], uniform int mode);
  548. void bc7_code_mode6(uint32 data[5], int qep[8], uint32 qblock[2]);
  549. ///////////////////////////
  550. // BC7 format data
  551. inline uniform const int* uniform get_unquant_table(uniform int bits)
  552. {
  553. assert(bits>=2 && bits<=4); // invalid bit size
  554. static uniform const int unquant_table_2bits[] = { 0, 21, 43, 64 };
  555. static uniform const int unquant_table_3bits[] = { 0, 9, 18, 27, 37, 46, 55, 64 };
  556. static uniform const int unquant_table_4bits[] = { 0, 4, 9, 13, 17, 21, 26, 30, 34, 38, 43, 47, 51, 55, 60, 64 };
  557. uniform const int* uniform unquant_tables[] = {unquant_table_2bits, unquant_table_3bits, unquant_table_4bits};
  558. return unquant_tables[bits-2];
  559. }
  560. inline uint32 get_pattern(int part_id)
  561. {
  562. static uniform const uint32 pattern_table[] = {
  563. 0x50505050u, 0x40404040u, 0x54545454u, 0x54505040u, 0x50404000u, 0x55545450u, 0x55545040u, 0x54504000u,
  564. 0x50400000u, 0x55555450u, 0x55544000u, 0x54400000u, 0x55555440u, 0x55550000u, 0x55555500u, 0x55000000u,
  565. 0x55150100u, 0x00004054u, 0x15010000u, 0x00405054u, 0x00004050u, 0x15050100u, 0x05010000u, 0x40505054u,
  566. 0x00404050u, 0x05010100u, 0x14141414u, 0x05141450u, 0x01155440u, 0x00555500u, 0x15014054u, 0x05414150u,
  567. 0x44444444u, 0x55005500u, 0x11441144u, 0x05055050u, 0x05500550u, 0x11114444u, 0x41144114u, 0x44111144u,
  568. 0x15055054u, 0x01055040u, 0x05041050u, 0x05455150u, 0x14414114u, 0x50050550u, 0x41411414u, 0x00141400u,
  569. 0x00041504u, 0x00105410u, 0x10541000u, 0x04150400u, 0x50410514u, 0x41051450u, 0x05415014u, 0x14054150u,
  570. 0x41050514u, 0x41505014u, 0x40011554u, 0x54150140u, 0x50505500u, 0x00555050u, 0x15151010u, 0x54540404u,
  571. 0xAA685050u, 0x6A5A5040u, 0x5A5A4200u, 0x5450A0A8u, 0xA5A50000u, 0xA0A05050u, 0x5555A0A0u, 0x5A5A5050u,
  572. 0xAA550000u, 0xAA555500u, 0xAAAA5500u, 0x90909090u, 0x94949494u, 0xA4A4A4A4u, 0xA9A59450u, 0x2A0A4250u,
  573. 0xA5945040u, 0x0A425054u, 0xA5A5A500u, 0x55A0A0A0u, 0xA8A85454u, 0x6A6A4040u, 0xA4A45000u, 0x1A1A0500u,
  574. 0x0050A4A4u, 0xAAA59090u, 0x14696914u, 0x69691400u, 0xA08585A0u, 0xAA821414u, 0x50A4A450u, 0x6A5A0200u,
  575. 0xA9A58000u, 0x5090A0A8u, 0xA8A09050u, 0x24242424u, 0x00AA5500u, 0x24924924u, 0x24499224u, 0x50A50A50u,
  576. 0x500AA550u, 0xAAAA4444u, 0x66660000u, 0xA5A0A5A0u, 0x50A050A0u, 0x69286928u, 0x44AAAA44u, 0x66666600u,
  577. 0xAA444444u, 0x54A854A8u, 0x95809580u, 0x96969600u, 0xA85454A8u, 0x80959580u, 0xAA141414u, 0x96960000u,
  578. 0xAAAA1414u, 0xA05050A0u, 0xA0A5A5A0u, 0x96000000u, 0x40804080u, 0xA9A8A9A8u, 0xAAAAAA44u, 0x2A4A5254u
  579. };
  580. return gather_uint(pattern_table, part_id);
  581. }
  582. inline int get_pattern_mask(int part_id, int j)
  583. {
  584. static uniform const uint32 pattern_mask_table[] = {
  585. 0xCCCC3333u, 0x88887777u, 0xEEEE1111u, 0xECC81337u, 0xC880377Fu, 0xFEEC0113u, 0xFEC80137u, 0xEC80137Fu,
  586. 0xC80037FFu, 0xFFEC0013u, 0xFE80017Fu, 0xE80017FFu, 0xFFE80017u, 0xFF0000FFu, 0xFFF0000Fu, 0xF0000FFFu,
  587. 0xF71008EFu, 0x008EFF71u, 0x71008EFFu, 0x08CEF731u, 0x008CFF73u, 0x73108CEFu, 0x3100CEFFu, 0x8CCE7331u,
  588. 0x088CF773u, 0x3110CEEFu, 0x66669999u, 0x366CC993u, 0x17E8E817u, 0x0FF0F00Fu, 0x718E8E71u, 0x399CC663u,
  589. 0xAAAA5555u, 0xF0F00F0Fu, 0x5A5AA5A5u, 0x33CCCC33u, 0x3C3CC3C3u, 0x55AAAA55u, 0x96966969u, 0xA55A5AA5u,
  590. 0x73CE8C31u, 0x13C8EC37u, 0x324CCDB3u, 0x3BDCC423u, 0x69969669u, 0xC33C3CC3u, 0x99666699u, 0x0660F99Fu,
  591. 0x0272FD8Du, 0x04E4FB1Bu, 0x4E40B1BFu, 0x2720D8DFu, 0xC93636C9u, 0x936C6C93u, 0x39C6C639u, 0x639C9C63u,
  592. 0x93366CC9u, 0x9CC66339u, 0x817E7E81u, 0xE71818E7u, 0xCCF0330Fu, 0x0FCCF033u, 0x774488BBu, 0xEE2211DDu,
  593. 0x08CC0133u, 0x8CC80037u, 0xCC80006Fu, 0xEC001331u, 0x330000FFu, 0x00CC3333u, 0xFF000033u, 0xCCCC0033u,
  594. 0x0F0000FFu, 0x0FF0000Fu, 0x00F0000Fu, 0x44443333u, 0x66661111u, 0x22221111u, 0x136C0013u, 0x008C8C63u,
  595. 0x36C80137u, 0x08CEC631u, 0x3330000Fu, 0xF0000333u, 0x00EE1111u, 0x88880077u, 0x22C0113Fu, 0x443088CFu,
  596. 0x0C22F311u, 0x03440033u, 0x69969009u, 0x9960009Fu, 0x03303443u, 0x00660699u, 0xC22C3113u, 0x8C0000EFu,
  597. 0x1300007Fu, 0xC4003331u, 0x004C1333u, 0x22229999u, 0x00F0F00Fu, 0x24929249u, 0x29429429u, 0xC30C30C3u,
  598. 0xC03C3C03u, 0x00AA0055u, 0xAA0000FFu, 0x30300303u, 0xC0C03333u, 0x90900909u, 0xA00A5005u, 0xAAA0000Fu,
  599. 0x0AAA0555u, 0xE0E01111u, 0x70700707u, 0x6660000Fu, 0x0EE01111u, 0x07707007u, 0x06660999u, 0x660000FFu,
  600. 0x00660099u, 0x0CC03333u, 0x03303003u, 0x60000FFFu, 0x80807777u, 0x10100101u, 0x000A0005u, 0x08CE8421u
  601. };
  602. uint32 mask_packed = gather_uint(pattern_mask_table, part_id);
  603. int mask0 = mask_packed&0xFFFF;
  604. int mask1 = mask_packed>>16;
  605. int mask = (j==2) ? (~mask0)&(~mask1) : ( (j==0) ? mask0 : mask1 );
  606. return mask;
  607. }
  608. inline void get_skips(int skips[3], int part_id)
  609. {
  610. static uniform const int skip_table[] = {
  611. 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u,
  612. 0xf0u, 0x20u, 0x80u, 0x20u, 0x20u, 0x80u, 0x80u, 0xf0u, 0x20u, 0x80u, 0x20u, 0x20u, 0x80u, 0x80u, 0x20u, 0x20u,
  613. 0xf0u, 0xf0u, 0x60u, 0x80u, 0x20u, 0x80u, 0xf0u, 0xf0u, 0x20u, 0x80u, 0x20u, 0x20u, 0x20u, 0xf0u, 0xf0u, 0x60u,
  614. 0x60u, 0x20u, 0x60u, 0x80u, 0xf0u, 0xf0u, 0x20u, 0x20u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0x20u, 0x20u, 0xf0u,
  615. 0x3fu, 0x38u, 0xf8u, 0xf3u, 0x8fu, 0x3fu, 0xf3u, 0xf8u, 0x8fu, 0x8fu, 0x6fu, 0x6fu, 0x6fu, 0x5fu, 0x3fu, 0x38u,
  616. 0x3fu, 0x38u, 0x8fu, 0xf3u, 0x3fu, 0x38u, 0x6fu, 0xa8u, 0x53u, 0x8fu, 0x86u, 0x6au, 0x8fu, 0x5fu, 0xfau, 0xf8u,
  617. 0x8fu, 0xf3u, 0x3fu, 0x5au, 0x6au, 0xa8u, 0x89u, 0xfau, 0xf6u, 0x3fu, 0xf8u, 0x5fu, 0xf3u, 0xf6u, 0xf6u, 0xf8u,
  618. 0x3fu, 0xf3u, 0x5fu, 0x5fu, 0x5fu, 0x8fu, 0x5fu, 0xafu, 0x5fu, 0xafu, 0x8fu, 0xdfu, 0xf3u, 0xcfu, 0x3fu, 0x38u
  619. };
  620. int skip_packed = gather_int(skip_table, part_id);
  621. skips[0] = 0;
  622. skips[1] = skip_packed>>4;
  623. skips[2] = skip_packed&15;
  624. }
  625. ///////////////////////////
  626. // PCA helpers
  627. inline void compute_stats_masked(float stats[15], float block[64], int mask, uniform int channels)
  628. {
  629. for (uniform int i=0; i<15; i++) stats[i] = 0;
  630. int mask_shifted = mask<<1;
  631. for (uniform int k=0; k<16; k++)
  632. {
  633. mask_shifted >>= 1;
  634. //if ((mask_shifted&1) == 0) continue;
  635. int flag = (mask_shifted&1);
  636. float rgba[4];
  637. for (uniform int p=0; p<channels; p++) rgba[p] = block[k+p*16];
  638. for (uniform int p=0; p<channels; p++) rgba[p] *= flag;
  639. stats[14] += flag;
  640. stats[10] += rgba[0];
  641. stats[11] += rgba[1];
  642. stats[12] += rgba[2];
  643. stats[0] += rgba[0]*rgba[0];
  644. stats[1] += rgba[0]*rgba[1];
  645. stats[2] += rgba[0]*rgba[2];
  646. stats[4] += rgba[1]*rgba[1];
  647. stats[5] += rgba[1]*rgba[2];
  648. stats[7] += rgba[2]*rgba[2];
  649. if (channels==4)
  650. {
  651. stats[13] += rgba[3];
  652. stats[3] += rgba[0]*rgba[3];
  653. stats[6] += rgba[1]*rgba[3];
  654. stats[8] += rgba[2]*rgba[3];
  655. stats[9] += rgba[3]*rgba[3];
  656. }
  657. }
  658. }
  659. inline void covar_from_stats(float covar[10], float stats[15], uniform int channels)
  660. {
  661. covar[0] = stats[0] - stats[10+0]*stats[10+0]/stats[14];
  662. covar[1] = stats[1] - stats[10+0]*stats[10+1]/stats[14];
  663. covar[2] = stats[2] - stats[10+0]*stats[10+2]/stats[14];
  664. covar[4] = stats[4] - stats[10+1]*stats[10+1]/stats[14];
  665. covar[5] = stats[5] - stats[10+1]*stats[10+2]/stats[14];
  666. covar[7] = stats[7] - stats[10+2]*stats[10+2]/stats[14];
  667. if (channels == 4)
  668. {
  669. covar[3] = stats[3] - stats[10+0]*stats[10+3]/stats[14];
  670. covar[6] = stats[6] - stats[10+1]*stats[10+3]/stats[14];
  671. covar[8] = stats[8] - stats[10+2]*stats[10+3]/stats[14];
  672. covar[9] = stats[9] - stats[10+3]*stats[10+3]/stats[14];
  673. }
  674. }
  675. inline void compute_covar_dc_masked(float covar[6], float dc[3], float block[64], int mask, uniform int channels)
  676. {
  677. float stats[15];
  678. compute_stats_masked(stats, block, mask, channels);
  679. covar_from_stats(covar, stats, channels);
  680. for (uniform int p=0; p<channels; p++) dc[p] = stats[10+p]/stats[14];
  681. }
  682. void block_pca_axis(float axis[4], float dc[4], float block[64], int mask, uniform int channels)
  683. {
  684. uniform const int powerIterations = 8; // 4 not enough for HQ
  685. float covar[10];
  686. compute_covar_dc_masked(covar, dc, block, mask, channels);
  687. //float var = covar[0] + covar[4] + covar[7] + covar[9] + 256;
  688. float inv_var = 1.0 / (256 * 256);
  689. for (uniform int k = 0; k < 10; k++)
  690. {
  691. covar[k] *= inv_var;
  692. }
  693. float eps = sq(0.001);
  694. covar[0] += eps;
  695. covar[4] += eps;
  696. covar[7] += eps;
  697. covar[9] += eps;
  698. compute_axis(axis, covar, powerIterations, channels);
  699. }
  700. void block_segment_core(float ep[], float block[64], int mask, uniform int channels)
  701. {
  702. float axis[4];
  703. float dc[4];
  704. block_pca_axis(axis, dc, block, mask, channels);
  705. float ext[2];
  706. ext[0] = +1e99;
  707. ext[1] = -1e99;
  708. // find min/max
  709. int mask_shifted = mask<<1;
  710. for (uniform int k=0; k<16; k++)
  711. {
  712. mask_shifted >>= 1;
  713. if ((mask_shifted&1) == 0) continue;
  714. float dot = 0;
  715. for (uniform int p=0; p<channels; p++)
  716. dot += axis[p]*(block[16*p+k]-dc[p]);
  717. ext[0] = min(ext[0], dot);
  718. ext[1] = max(ext[1], dot);
  719. }
  720. // create some distance if the endpoints collapse
  721. if (ext[1]-ext[0] < 1f)
  722. {
  723. ext[0] -= 0.5f;
  724. ext[1] += 0.5f;
  725. }
  726. for (uniform int i=0; i<2; i++)
  727. for (uniform int p=0; p<channels; p++)
  728. {
  729. ep[4*i+p] = ext[i]*axis[p]+dc[p];
  730. }
  731. }
  732. void block_segment(float ep[], float block[64], int mask, uniform int channels)
  733. {
  734. block_segment_core(ep, block, mask, channels);
  735. for (uniform int i=0; i<2; i++)
  736. for (uniform int p=0; p<channels; p++)
  737. {
  738. ep[4*i+p] = clamp(ep[4*i+p], 0, 255);
  739. }
  740. }
  741. float get_pca_bound(float covar[10], uniform int channels)
  742. {
  743. uniform const int powerIterations = 4; // quite approximative, but enough for bounding
  744. float inv_var = 1.0 / (256 * 256);
  745. for (uniform int k = 0; k < 10; k++)
  746. {
  747. covar[k] *= inv_var;
  748. }
  749. float eps = sq(0.001);
  750. covar[0] += eps;
  751. covar[4] += eps;
  752. covar[7] += eps;
  753. float axis[4];
  754. compute_axis(axis, covar, powerIterations, channels);
  755. float vec[4];
  756. if (channels == 3) ssymv3(vec, covar, axis);
  757. if (channels == 4) ssymv4(vec, covar, axis);
  758. float sq_sum = 0f;
  759. for (uniform int p=0; p<channels; p++) sq_sum += sq(vec[p]);
  760. float lambda = sqrt(sq_sum);
  761. float bound = covar[0]+covar[4]+covar[7];
  762. if (channels == 4) bound += covar[9];
  763. bound -= lambda;
  764. bound = max(bound, 0.0);
  765. return bound;
  766. }
  767. float block_pca_bound(float block[64], int mask, uniform int channels)
  768. {
  769. float stats[15];
  770. compute_stats_masked(stats, block, mask, channels);
  771. float covar[10];
  772. covar_from_stats(covar, stats, channels);
  773. return get_pca_bound(covar, channels);
  774. }
  775. float block_pca_bound_split(float block[64], int mask, float full_stats[15], uniform int channels)
  776. {
  777. float stats[15];
  778. compute_stats_masked(stats, block, mask, channels);
  779. float covar1[10];
  780. covar_from_stats(covar1, stats, channels);
  781. for (uniform int i=0; i<15; i++)
  782. stats[i] = full_stats[i] - stats[i];
  783. float covar2[10];
  784. covar_from_stats(covar2, stats, channels);
  785. float bound = 0f;
  786. bound += get_pca_bound(covar1, channels);
  787. bound += get_pca_bound(covar2, channels);
  788. return sqrt(bound)*256;
  789. }
  790. ///////////////////////////
  791. // endpoint quantization
  792. inline int unpack_to_byte(int v, uniform const int bits)
  793. {
  794. assert(bits >= 4);
  795. int vv = v<<(8-bits);
  796. return vv + shift_right(vv, bits);
  797. }
  798. void ep_quant0367(int qep[], float ep[], uniform int mode, uniform int channels)
  799. {
  800. uniform int bits = 7;
  801. if (mode == 0) bits = 4;
  802. if (mode == 7) bits = 5;
  803. uniform int levels = 1 << bits;
  804. uniform int levels2 = levels*2-1;
  805. for (uniform int i=0; i<2; i++)
  806. {
  807. int qep_b[8];
  808. for (uniform int b=0; b<2; b++)
  809. for (uniform int p=0; p<4; p++)
  810. {
  811. int v = (int)((ep[i*4+p]/255f*levels2-b)/2+0.5)*2+b;
  812. qep_b[b*4+p] = clamp(v, b, levels2-1+b);
  813. }
  814. float ep_b[8];
  815. for (uniform int j=0; j<8; j++)
  816. ep_b[j] = qep_b[j];
  817. if (mode==0)
  818. for (uniform int j=0; j<8; j++)
  819. ep_b[j] = unpack_to_byte(qep_b[j], 5);
  820. float err0 = 0f;
  821. float err1 = 0f;
  822. for (uniform int p=0; p<channels; p++)
  823. {
  824. err0 += sq(ep[i*4+p]-ep_b[0+p]);
  825. err1 += sq(ep[i*4+p]-ep_b[4+p]);
  826. }
  827. if(channels==4 && ep[i*4+3]<=0.5f)err0=-1; // ESENTHEL CHANGED, BC7 allows to encode end points in 2 quantized modes, #1 standard, #2 add "0.5*levels" to all channels (1 extra bit precision, however it affects all channels at the same time, so if we have alpha=0, but RGB channels have smaller error with the extra 0.5 value, then alpha would get the +0.5 too, and it could destroy complete transparency, so this code always forces #1 version if we have alpha=0)
  828. for (uniform int p=0; p<4; p++)
  829. qep[i*4+p] = (err0<err1) ? qep_b[0+p] : qep_b[4+p];
  830. }
  831. }
  832. void ep_quant1(int qep[], float ep[], uniform int mode)
  833. {
  834. int qep_b[16];
  835. for (uniform int b=0; b<2; b++)
  836. for (uniform int i=0; i<8; i++)
  837. {
  838. int v = ((int)((ep[i]/255f*127f-b)/2+0.5))*2+b;
  839. qep_b[b*8+i] = clamp(v, b, 126+b);
  840. }
  841. // dequant
  842. float ep_b[16];
  843. for (uniform int k=0; k<16; k++)
  844. ep_b[k] = unpack_to_byte(qep_b[k], 7);
  845. float err0 = 0f;
  846. float err1 = 0f;
  847. for (uniform int j = 0; j < 2; j++)
  848. for (uniform int p = 0; p < 3; p++)
  849. {
  850. err0 += sq(ep[j * 4 + p] - ep_b[0 + j * 4 + p]);
  851. err1 += sq(ep[j * 4 + p] - ep_b[8 + j * 4 + p]);
  852. }
  853. for (uniform int i=0; i<8; i++)
  854. qep[i] = (err0<err1) ? qep_b[0+i] : qep_b[8+i];
  855. }
  856. void ep_quant245(int qep[], float ep[], uniform int mode)
  857. {
  858. uniform int bits = 5;
  859. if (mode == 5) bits = 7;
  860. uniform int levels = 1 << bits;
  861. for (uniform int i=0; i<8; i++)
  862. {
  863. int v = ((int)(ep[i]/255f*(levels-1)+0.5));
  864. qep[i] = clamp(v, 0, levels-1);
  865. }
  866. }
  867. void ep_quant(int qep[], float ep[], uniform int mode, uniform int channels)
  868. {
  869. assert(mode <= 7);
  870. static uniform const int pairs_table[] = {3,2,3,2,1,1,1,2};
  871. uniform const int pairs = pairs_table[mode];
  872. if (mode == 0 || mode == 3 || mode == 6 || mode == 7)
  873. {
  874. for (uniform int i=0; i<pairs; i++)
  875. ep_quant0367(&qep[i*8], &ep[i*8], mode, channels);
  876. }
  877. else if (mode == 1)
  878. {
  879. for (uniform int i=0; i<pairs; i++)
  880. ep_quant1(&qep[i*8], &ep[i*8], mode);
  881. }
  882. else if (mode == 2 || mode == 4 || mode == 5)
  883. {
  884. for (uniform int i=0; i<pairs; i++)
  885. ep_quant245(&qep[i*8], &ep[i*8], mode);
  886. }
  887. else
  888. assert(false);
  889. }
  890. void ep_dequant(float ep[], int qep[], uniform int mode)
  891. {
  892. assert(mode <= 7);
  893. static uniform const int pairs_table[] = {3,2,3,2,1,1,1,2};
  894. uniform const int pairs = pairs_table[mode];
  895. // mode 3, 6 are 8-bit
  896. if (mode == 3 || mode == 6)
  897. {
  898. for (uniform int i=0; i<8*pairs; i++)
  899. ep[i] = qep[i];
  900. }
  901. else if (mode == 1 || mode == 5)
  902. {
  903. for (uniform int i=0; i<8*pairs; i++)
  904. ep[i] = unpack_to_byte(qep[i], 7);
  905. }
  906. else if (mode == 0 || mode == 2 || mode == 4)
  907. {
  908. for (uniform int i=0; i<8*pairs; i++)
  909. ep[i] = unpack_to_byte(qep[i], 5);
  910. }
  911. else if (mode == 7)
  912. {
  913. for (uniform int i=0; i<8*pairs; i++)
  914. ep[i] = unpack_to_byte(qep[i], 6);
  915. }
  916. else
  917. assert(false);
  918. }
  919. void ep_quant_dequant(int qep[], float ep[], uniform int mode, uniform int channels)
  920. {
  921. ep_quant(qep, ep, mode, channels);
  922. ep_dequant(ep, qep, mode);
  923. }
  924. ///////////////////////////
  925. // pixel quantization
  926. float block_quant(uint32 qblock[2], float block[64], uniform int bits, float ep[], uint32 pattern, uniform int channels)
  927. {
  928. float total_err = 0;
  929. uniform const int* uniform unquant_table = get_unquant_table(bits);
  930. int levels = 1 << bits;
  931. // 64-bit qblock: 5% overhead in this function
  932. for (uniform int k=0; k<2; k++) qblock[k] = 0;
  933. int pattern_shifted = pattern;
  934. for (uniform int k=0; k<16; k++)
  935. {
  936. int j = pattern_shifted&3;
  937. pattern_shifted >>= 2;
  938. float proj = 0;
  939. float div = 0;
  940. for (uniform int p=0; p<channels; p++)
  941. {
  942. float ep_a = gather_float(ep, 8*j+0+p);
  943. float ep_b = gather_float(ep, 8*j+4+p);
  944. proj += (block[k+p*16]-ep_a)*(ep_b-ep_a);
  945. div += sq(ep_b-ep_a);
  946. }
  947. proj /= div;
  948. int q1 = (int)(proj*levels+0.5);
  949. q1 = clamp(q1, 1, levels-1);
  950. float err0 = 0;
  951. float err1 = 0;
  952. int w0 = gather_int(unquant_table, q1-1);
  953. int w1 = gather_int(unquant_table, q1);
  954. for (uniform int p=0; p<channels; p++)
  955. {
  956. float ep_a = gather_float(ep, 8*j+0+p);
  957. float ep_b = gather_float(ep, 8*j+4+p);
  958. float dec_v0 = (int)(((64-w0)*ep_a + w0*ep_b + 32)/64);
  959. float dec_v1 = (int)(((64-w1)*ep_a + w1*ep_b + 32)/64);
  960. err0 += sq(dec_v0 - block[k+p*16]);
  961. err1 += sq(dec_v1 - block[k+p*16]);
  962. }
  963. int best_err = err1;
  964. int best_q = q1;
  965. if (err0<err1)
  966. {
  967. best_err = err0;
  968. best_q = q1-1;
  969. }
  970. assert(best_q>=0 && best_q<=levels-1);
  971. qblock[k/8] += ((uint32)best_q) << 4*(k%8);
  972. total_err += best_err;
  973. }
  974. return total_err;
  975. }
  976. ///////////////////////////
  977. // LS endpoint refinement
  978. void opt_endpoints(float ep[], float block[64], uniform int bits, uint32 qblock[2], int mask, uniform int channels)
  979. {
  980. uniform int levels = 1 << bits;
  981. float Atb1[4] = {0,0,0,0};
  982. float sum_q = 0;
  983. float sum_qq = 0;
  984. float sum[5] = {0,0,0,0,0};
  985. int mask_shifted = mask<<1;
  986. for (uniform int k1=0; k1<2; k1++)
  987. {
  988. uint32 qbits_shifted = qblock[k1];
  989. for (uniform int k2=0; k2<8; k2++)
  990. {
  991. uniform int k = k1*8+k2;
  992. float q = (int)(qbits_shifted&15);
  993. qbits_shifted >>= 4;
  994. mask_shifted >>= 1;
  995. if ((mask_shifted&1) == 0) continue;
  996. int x = (levels-1)-q;
  997. int y = q;
  998. sum_q += q;
  999. sum_qq += q*q;
  1000. sum[4] += 1;
  1001. for (uniform int p=0; p<channels; p++) sum[p] += block[k+p*16];
  1002. for (uniform int p=0; p<channels; p++) Atb1[p] += x*block[k+p*16];
  1003. }
  1004. }
  1005. float Atb2[4];
  1006. for (uniform int p=0; p<channels; p++)
  1007. {
  1008. //sum[p] = dc[p]*16;
  1009. Atb2[p] = (levels-1)*sum[p]-Atb1[p];
  1010. }
  1011. float Cxx = sum[4]*sq(levels-1)-2*(levels-1)*sum_q+sum_qq;
  1012. float Cyy = sum_qq;
  1013. float Cxy = (levels-1)*sum_q-sum_qq;
  1014. float scale = (levels-1) / (Cxx*Cyy - Cxy*Cxy);
  1015. for (uniform int p=0; p<channels; p++)
  1016. {
  1017. ep[0+p] = (Atb1[p]*Cyy - Atb2[p]*Cxy)*scale;
  1018. ep[4+p] = (Atb2[p]*Cxx - Atb1[p]*Cxy)*scale;
  1019. //ep[0+p] = clamp(ep[0+p], 0, 255);
  1020. //ep[4+p] = clamp(ep[4+p], 0, 255);
  1021. }
  1022. if (abs(Cxx*Cyy - Cxy*Cxy) < 0.001)
  1023. {
  1024. // flatten
  1025. for (uniform int p=0; p<channels; p++)
  1026. {
  1027. ep[0+p] = sum[p]/sum[4];
  1028. ep[4+p] = ep[0+p];
  1029. }
  1030. }
  1031. }
  1032. //////////////////////////
  1033. // parameter estimation
  1034. float compute_opaque_err(float block[64], uniform int channels)
  1035. {
  1036. if (channels == 3) return 0;
  1037. float err = 0f;
  1038. for (uniform int k=0; k<16; k++)
  1039. {
  1040. err += sq(block[48+k]-255);
  1041. }
  1042. return err;
  1043. }
  1044. float bc7_enc_mode01237_part_fast(int qep[24], uint32 qblock[2], float block[64], int part_id, uniform int mode)
  1045. {
  1046. uint32 pattern = get_pattern(part_id);
  1047. uniform int bits = 2; if (mode == 0 || mode == 1) bits = 3;
  1048. uniform int pairs = 2; if (mode == 0 || mode == 2) pairs = 3;
  1049. uniform int channels = 3; if (mode == 7) channels = 4;
  1050. float ep[24];
  1051. for (uniform int j=0; j<pairs; j++)
  1052. {
  1053. int mask = get_pattern_mask(part_id, j);
  1054. block_segment(&ep[j*8], block, mask, channels);
  1055. }
  1056. ep_quant_dequant(qep, ep, mode, channels);
  1057. float total_err = block_quant(qblock, block, bits, ep, pattern, channels);
  1058. return total_err;
  1059. }
  1060. void bc7_enc_mode01237(bc7_enc_state state[], uniform int mode, int part_list[], uniform int part_count)
  1061. {
  1062. if (part_count == 0) return;
  1063. uniform int bits = 2; if (mode == 0 || mode == 1) bits = 3;
  1064. uniform int pairs = 2; if (mode == 0 || mode == 2) pairs = 3;
  1065. uniform int channels = 3; if (mode == 7) channels = 4;
  1066. int best_qep[24];
  1067. uint32 best_qblock[2];
  1068. int best_part_id = -1;
  1069. float best_err = 1e99;
  1070. for (uniform int part=0; part<part_count; part++)
  1071. {
  1072. int part_id = part_list[part]&63;
  1073. if (pairs == 3) part_id += 64;
  1074. int qep[24];
  1075. uint32 qblock[2];
  1076. float err = bc7_enc_mode01237_part_fast(qep, qblock, state->block, part_id, mode);
  1077. if (err<best_err)
  1078. {
  1079. for (uniform int i=0; i<8*pairs; i++) best_qep[i] = qep[i];
  1080. for (uniform int k=0; k<2; k++) best_qblock[k] = qblock[k];
  1081. best_part_id = part_id;
  1082. best_err = err;
  1083. }
  1084. }
  1085. // refine
  1086. uniform int refineIterations = state->refineIterations[mode];
  1087. for (uniform int _=0; _<refineIterations; _++)
  1088. {
  1089. float ep[24];
  1090. for (uniform int j=0; j<pairs; j++)
  1091. {
  1092. int mask = get_pattern_mask(best_part_id, j);
  1093. opt_endpoints(&ep[j*8], state->block, bits, best_qblock, mask, channels);
  1094. }
  1095. int qep[24];
  1096. uint32 qblock[2];
  1097. ep_quant_dequant(qep, ep, mode, channels);
  1098. uint32 pattern = get_pattern(best_part_id);
  1099. float err = block_quant(qblock, state->block, bits, ep, pattern, channels);
  1100. if (err<best_err)
  1101. {
  1102. for (uniform int i=0; i<8*pairs; i++) best_qep[i] = qep[i];
  1103. for (uniform int k=0; k<2; k++) best_qblock[k] = qblock[k];
  1104. best_err = err;
  1105. }
  1106. }
  1107. if (mode != 7) best_err += state->opaque_err; // take into account alpha channel
  1108. if (best_err<state->best_err)
  1109. {
  1110. state->best_err = best_err;
  1111. bc7_code_mode01237(state->best_data, best_qep, best_qblock, best_part_id, mode);
  1112. }
  1113. }
  1114. void partial_sort_list(int list[], uniform int length, uniform int partial_count)
  1115. {
  1116. for (uniform int k=0; k<partial_count; k++)
  1117. {
  1118. int best_idx = k;
  1119. int best_value = list[k];
  1120. for (uniform int i=k+1; i<length; i++)
  1121. {
  1122. if (best_value > list[i])
  1123. {
  1124. best_value = list[i];
  1125. best_idx = i;
  1126. }
  1127. }
  1128. // swap
  1129. scatter_int(list, best_idx, list[k]);
  1130. list[k] = best_value;
  1131. }
  1132. }
  1133. void bc7_enc_mode02(bc7_enc_state state[])
  1134. {
  1135. int part_list[64];
  1136. for (uniform int part=0; part<64; part++)
  1137. part_list[part] = part;
  1138. bc7_enc_mode01237(state, 0, part_list, 16);
  1139. if (!state->skip_mode2) bc7_enc_mode01237(state, 2, part_list, 64); // usually not worth the time
  1140. }
  1141. void bc7_enc_mode13(bc7_enc_state state[])
  1142. {
  1143. if (state->fastSkipTreshold_mode1 == 0 && state->fastSkipTreshold_mode3 == 0) return;
  1144. float full_stats[15];
  1145. compute_stats_masked(full_stats, state->block, -1, 3);
  1146. int part_list[64];
  1147. for (uniform int part=0; part<64; part++)
  1148. {
  1149. int mask = get_pattern_mask(part+0, 0);
  1150. float bound12 = block_pca_bound_split(state->block, mask, full_stats, 3);
  1151. int bound = (int)(bound12);
  1152. part_list[part] = part+bound*64;
  1153. }
  1154. partial_sort_list(part_list, 64, max(state->fastSkipTreshold_mode1, state->fastSkipTreshold_mode3));
  1155. bc7_enc_mode01237(state, 1, part_list, state->fastSkipTreshold_mode1);
  1156. bc7_enc_mode01237(state, 3, part_list, state->fastSkipTreshold_mode3);
  1157. }
  1158. void bc7_enc_mode7(bc7_enc_state state[])
  1159. {
  1160. if (state->fastSkipTreshold_mode7 == 0) return;
  1161. float full_stats[15];
  1162. compute_stats_masked(full_stats, state->block, -1, state->channels);
  1163. int part_list[64];
  1164. for (uniform int part=0; part<64; part++)
  1165. {
  1166. int mask = get_pattern_mask(part+0, 0);
  1167. float bound12 = block_pca_bound_split(state->block, mask, full_stats, state->channels);
  1168. int bound = (int)(bound12);
  1169. part_list[part] = part+bound*64;
  1170. }
  1171. partial_sort_list(part_list, 64, state->fastSkipTreshold_mode7);
  1172. bc7_enc_mode01237(state, 7, part_list, state->fastSkipTreshold_mode7);
  1173. }
  1174. void channel_quant_dequant(int qep[2], float ep[2], uniform int epbits)
  1175. {
  1176. int elevels = (1<<epbits);
  1177. for (uniform int i=0; i<2; i++)
  1178. {
  1179. int v = ((int)(ep[i]/255f*(elevels-1)+0.5));
  1180. qep[i] = clamp(v, 0, elevels-1);
  1181. ep[i] = unpack_to_byte(qep[i], epbits);
  1182. }
  1183. }
  1184. void channel_opt_endpoints(float ep[2], float block[16], uniform int bits, uint32 qblock[2])
  1185. {
  1186. uniform int levels = 1 << bits;
  1187. float Atb1 = 0;
  1188. float sum_q = 0;
  1189. float sum_qq = 0;
  1190. float sum = 0;
  1191. for (uniform int k1=0; k1<2; k1++)
  1192. {
  1193. uint32 qbits_shifted = qblock[k1];
  1194. for (uniform int k2=0; k2<8; k2++)
  1195. {
  1196. uniform int k = k1*8+k2;
  1197. float q = (int)(qbits_shifted&15);
  1198. qbits_shifted >>= 4;
  1199. int x = (levels-1)-q;
  1200. int y = q;
  1201. sum_q += q;
  1202. sum_qq += q*q;
  1203. sum += block[k];
  1204. Atb1 += x*block[k];
  1205. }
  1206. }
  1207. float Atb2 = (levels-1)*sum-Atb1;
  1208. float Cxx = 16*sq(levels-1)-2*(levels-1)*sum_q+sum_qq;
  1209. float Cyy = sum_qq;
  1210. float Cxy = (levels-1)*sum_q-sum_qq;
  1211. float scale = (levels-1) / (Cxx*Cyy - Cxy*Cxy);
  1212. ep[0] = (Atb1*Cyy - Atb2*Cxy)*scale;
  1213. ep[1] = (Atb2*Cxx - Atb1*Cxy)*scale;
  1214. ep[0] = clamp(ep[0], 0, 255);
  1215. ep[1] = clamp(ep[1], 0, 255);
  1216. if (abs(Cxx*Cyy - Cxy*Cxy) < 0.001)
  1217. {
  1218. ep[0] = sum/16;
  1219. ep[1] = ep[0];
  1220. }
  1221. }
  1222. float channel_opt_quant(uint32 qblock[2], float block[16], uniform int bits, float ep[])
  1223. {
  1224. uniform const int* uniform unquant_table = get_unquant_table(bits);
  1225. int levels = (1<<bits);
  1226. qblock[0] = 0;
  1227. qblock[1] = 0;
  1228. float total_err = 0;
  1229. for (uniform int k=0; k<16; k++)
  1230. {
  1231. float proj = (block[k]-ep[0])/(ep[1]-ep[0]+0.001f);
  1232. int q1 = (int)(proj*levels+0.5);
  1233. q1 = clamp(q1, 1, levels-1);
  1234. float err0 = 0;
  1235. float err1 = 0;
  1236. int w0 = gather_int(unquant_table, q1-1);
  1237. int w1 = gather_int(unquant_table, q1);
  1238. float dec_v0 = (int)(((64-w0)*ep[0] + w0*ep[1] + 32)/64);
  1239. float dec_v1 = (int)(((64-w1)*ep[0] + w1*ep[1] + 32)/64);
  1240. err0 += sq(dec_v0 - block[k]);
  1241. err1 += sq(dec_v1 - block[k]);
  1242. int best_err = err1;
  1243. int best_q = q1;
  1244. if (err0<err1)
  1245. {
  1246. best_err = err0;
  1247. best_q = q1-1;
  1248. }
  1249. qblock[k/8] += ((uint32)best_q) << 4*(k%8);
  1250. total_err += best_err;
  1251. }
  1252. return total_err;
  1253. }
  1254. float opt_channel(bc7_enc_state state[], uint32 qblock[2], int qep[2], float block[16], uniform int bits, uniform int epbits)
  1255. {
  1256. float ep[2] = {255,0};
  1257. for (uniform int k=0; k<16; k++)
  1258. {
  1259. ep[0] = min(ep[0], block[k]);
  1260. ep[1] = max(ep[1], block[k]);
  1261. }
  1262. channel_quant_dequant(qep, ep, epbits);
  1263. float err = channel_opt_quant(qblock, block, bits, ep);
  1264. // refine
  1265. uniform const int refineIterations = state->refineIterations_channel;
  1266. for (uniform int i=0; i<refineIterations; i++)
  1267. {
  1268. channel_opt_endpoints(ep, block, bits, qblock);
  1269. channel_quant_dequant(qep, ep, epbits);
  1270. err = channel_opt_quant(qblock, block, bits, ep);
  1271. }
  1272. return err;
  1273. }
  1274. void bc7_enc_mode45_candidate(bc7_enc_state state[], mode45_parameters best_candidate[],
  1275. float best_err[], uniform int mode, uniform int rotation, uniform int swap)
  1276. {
  1277. uniform int bits = 2;
  1278. uniform int abits = 2; if (mode==4) abits = 3;
  1279. uniform int aepbits = 8; if (mode==4) aepbits = 6;
  1280. if (swap==1) { bits = 3; abits = 2; } // (mode 4)
  1281. float block[48];
  1282. for (uniform int k=0; k<16; k++)
  1283. {
  1284. for (uniform int p=0; p<3; p++)
  1285. block[k+p*16] = state->block[k+p*16];
  1286. if (rotation < 3)
  1287. {
  1288. // apply channel rotation
  1289. if (state->channels == 4) block[k+rotation*16] = state->block[k+3*16];
  1290. if (state->channels == 3) block[k+rotation*16] = 255;
  1291. }
  1292. }
  1293. float ep[8];
  1294. block_segment(ep, block, -1, 3);
  1295. int qep[8];
  1296. ep_quant_dequant(qep, ep, mode, 3);
  1297. uint32 qblock[2];
  1298. float err = block_quant(qblock, block, bits, ep, 0, 3);
  1299. // refine
  1300. uniform int refineIterations = state->refineIterations[mode];
  1301. for (uniform int i=0; i<refineIterations; i++)
  1302. {
  1303. opt_endpoints(ep, block, bits, qblock, -1, 3);
  1304. ep_quant_dequant(qep, ep, mode, 3);
  1305. err = block_quant(qblock, block, bits, ep, 0, 3);
  1306. }
  1307. // encoding selected channel
  1308. int aqep[2];
  1309. uint32 aqblock[2];
  1310. err += opt_channel(state, aqblock, aqep, &state->block[rotation*16], abits, aepbits);
  1311. if (err<*best_err)
  1312. {
  1313. swap_ints(best_candidate->qep, qep, 8);
  1314. swap_uints(best_candidate->qblock, qblock, 2);
  1315. swap_ints(best_candidate->aqep, aqep, 2);
  1316. swap_uints(best_candidate->aqblock, aqblock, 2);
  1317. best_candidate->rotation = rotation;
  1318. best_candidate->swap = swap;
  1319. *best_err = err;
  1320. }
  1321. }
  1322. void bc7_enc_mode45(bc7_enc_state state[])
  1323. {
  1324. mode45_parameters best_candidate;
  1325. float best_err = state->best_err;
  1326. memset(&best_candidate, 0, sizeof(mode45_parameters));
  1327. uniform int channel0 = state->mode45_channel0;
  1328. for (uniform int p=channel0; p<state->channels; p++)
  1329. {
  1330. bc7_enc_mode45_candidate(state, &best_candidate, &best_err, 4, p, 0);
  1331. bc7_enc_mode45_candidate(state, &best_candidate, &best_err, 4, p, 1);
  1332. }
  1333. // mode 4
  1334. if (best_err<state->best_err)
  1335. {
  1336. state->best_err = best_err;
  1337. bc7_code_mode45(state->best_data, &best_candidate, 4);
  1338. }
  1339. for (uniform int p=channel0; p<state->channels; p++)
  1340. {
  1341. bc7_enc_mode45_candidate(state, &best_candidate, &best_err, 5, p, 0);
  1342. }
  1343. // mode 5
  1344. if (best_err<state->best_err)
  1345. {
  1346. state->best_err = best_err;
  1347. bc7_code_mode45(state->best_data, &best_candidate, 5);
  1348. }
  1349. }
  1350. void bc7_enc_mode6(bc7_enc_state state[])
  1351. {
  1352. uniform int mode = 6;
  1353. uniform int bits = 4;
  1354. float ep[8];
  1355. block_segment(ep, state->block, -1, state->channels);
  1356. if (state->channels == 3)
  1357. {
  1358. ep[3] = ep[7] = 255;
  1359. }
  1360. int qep[8];
  1361. ep_quant_dequant(qep, ep, mode, state->channels);
  1362. uint32 qblock[2];
  1363. float err = block_quant(qblock, state->block, bits, ep, 0, state->channels);
  1364. // refine
  1365. uniform int refineIterations = state->refineIterations[mode];
  1366. for (uniform int i=0; i<refineIterations; i++)
  1367. {
  1368. opt_endpoints(ep, state->block, bits, qblock, -1, state->channels);
  1369. ep_quant_dequant(qep, ep, mode, state->channels);
  1370. err = block_quant(qblock, state->block, bits, ep, 0, state->channels);
  1371. }
  1372. if (err<state->best_err)
  1373. {
  1374. state->best_err = err;
  1375. bc7_code_mode6(state->best_data, qep, qblock);
  1376. }
  1377. }
  1378. //////////////////////////
  1379. // BC7 bitstream coding
  1380. void bc7_code_apply_swap_mode456(int qep[], uniform int channels, uint32 qblock[2], uniform int bits)
  1381. {
  1382. uniform int levels = 1 << bits;
  1383. if ((qblock[0]&15)>=levels/2)
  1384. {
  1385. swap_ints(&qep[0], &qep[channels], channels);
  1386. for (uniform int k=0; k<2; k++)
  1387. qblock[k] = (uint32)(0x11111111*(levels-1)) - qblock[k];
  1388. }
  1389. assert((qblock[0]&15) < levels/2);
  1390. }
  1391. int bc7_code_apply_swap_mode01237(int qep[], uint32 qblock[2], uniform int mode, int part_id)
  1392. {
  1393. uniform int bits = 2; if (mode == 0 || mode == 1) bits = 3;
  1394. uniform int pairs = 2; if (mode == 0 || mode == 2) pairs = 3;
  1395. int flips = 0;
  1396. uniform int levels = 1 << bits;
  1397. int skips[3];
  1398. get_skips(skips, part_id);
  1399. for (uniform int j=0; j<pairs; j++)
  1400. {
  1401. int k0 = skips[j];
  1402. //int q = (qblock[k0/8]>>((k0%8)*4))&15;
  1403. int q = ((gather_uint(qblock, k0>>3)<<(28-(k0&7)*4))>>28);
  1404. if (q>=levels/2)
  1405. {
  1406. swap_ints(&qep[8*j], &qep[8*j+4], 4);
  1407. uint32 pmask = get_pattern_mask(part_id, j);
  1408. flips |= pmask;
  1409. }
  1410. }
  1411. return flips;
  1412. }
  1413. void put_bits(uint32 data[5], uniform int* uniform pos, uniform int bits, int v)
  1414. {
  1415. assert(v<pow2(bits));
  1416. data[*pos/32] |= ((uint32)v) << (*pos%32);
  1417. if (*pos%32+bits>32)
  1418. {
  1419. data[*pos/32+1] |= shift_right(v, 32-*pos%32);
  1420. }
  1421. *pos += bits;
  1422. }
  1423. inline void data_shl_1bit_from(uint32 data[5], int from)
  1424. {
  1425. if (from < 96)
  1426. {
  1427. assert(from > 64+10);
  1428. uint32 shifted = (data[2]>>1) | (data[3]<<31);
  1429. uint32 mask = (pow2(from-64)-1)>>1;
  1430. data[2] = (mask&data[2]) | (~mask&shifted);
  1431. data[3] = (data[3]>>1) | (data[4]<<31);
  1432. data[4] = data[4]>>1;
  1433. }
  1434. else if (from < 128)
  1435. {
  1436. uint32 shifted = (data[3]>>1) | (data[4]<<31);
  1437. uint32 mask = (pow2(from-96)-1)>>1;
  1438. data[3] = (mask&data[3]) | (~mask&shifted);
  1439. data[4] = data[4]>>1;
  1440. }
  1441. }
  1442. void bc7_code_qblock(uint32 data[5], uniform int* uniform pPos, uint32 qblock[2], uniform int bits, int flips)
  1443. {
  1444. uniform int levels = 1 << bits;
  1445. int flips_shifted = flips;
  1446. for (uniform int k1=0; k1<2; k1++)
  1447. {
  1448. uint32 qbits_shifted = qblock[k1];
  1449. for (uniform int k2=0; k2<8; k2++)
  1450. {
  1451. int q = qbits_shifted&15;
  1452. if ((flips_shifted&1)>0) q = (levels-1)-q;
  1453. if (k1==0 && k2==0) put_bits(data, pPos, bits-1, q);
  1454. else put_bits(data, pPos, bits , q);
  1455. qbits_shifted >>= 4;
  1456. flips_shifted >>= 1;
  1457. }
  1458. }
  1459. }
  1460. void bc7_code_adjust_skip_mode01237(uint32 data[5], uniform int mode, int part_id)
  1461. {
  1462. uniform int bits = 2; if (mode == 0 || mode == 1) bits = 3;
  1463. uniform int pairs = 2; if (mode == 0 || mode == 2) pairs = 3;
  1464. int skips[3];
  1465. get_skips(skips, part_id);
  1466. if (pairs>2 && skips[1] < skips[2])
  1467. {
  1468. int t = skips[1]; skips[1] = skips[2]; skips[2] = t;
  1469. }
  1470. for (uniform int j=1; j<pairs; j++)
  1471. {
  1472. int k = skips[j];
  1473. data_shl_1bit_from(data, 128+(pairs-1)-(15-k)*bits);
  1474. }
  1475. }
  1476. void bc7_code_mode01237(uint32 data[5], int qep[], uint32 qblock[2], int part_id, uniform int mode)
  1477. {
  1478. uniform int bits = 2; if (mode == 0 || mode == 1) bits = 3;
  1479. uniform int pairs = 2; if (mode == 0 || mode == 2) pairs = 3;
  1480. uniform int channels = 3; if (mode == 7) channels = 4;
  1481. int flips = bc7_code_apply_swap_mode01237(qep, qblock, mode, part_id);
  1482. for (uniform int k=0; k<5; k++) data[k] = 0;
  1483. uniform int pos = 0;
  1484. // mode 0-3, 7
  1485. put_bits(data, &pos, mode+1, 1<<mode);
  1486. // partition
  1487. if (mode==0)
  1488. {
  1489. put_bits(data, &pos, 4, part_id&15);
  1490. }
  1491. else
  1492. {
  1493. put_bits(data, &pos, 6, part_id&63);
  1494. }
  1495. // endpoints
  1496. for (uniform int p=0; p<channels; p++)
  1497. for (uniform int j=0; j<pairs*2; j++)
  1498. {
  1499. if (mode == 0)
  1500. {
  1501. put_bits(data, &pos, 4, qep[j*4+0+p]>>1);
  1502. }
  1503. else if (mode == 1)
  1504. {
  1505. put_bits(data, &pos, 6, qep[j*4+0+p]>>1);
  1506. }
  1507. else if (mode == 2)
  1508. {
  1509. put_bits(data, &pos, 5, qep[j*4+0+p]);
  1510. }
  1511. else if (mode == 3)
  1512. {
  1513. put_bits(data, &pos, 7, qep[j*4+0+p]>>1);
  1514. }
  1515. else if (mode == 7)
  1516. {
  1517. put_bits(data, &pos, 5, qep[j*4+0+p]>>1);
  1518. }
  1519. else
  1520. {
  1521. assert(false);
  1522. }
  1523. }
  1524. // p bits
  1525. if (mode == 1)
  1526. for (uniform int j=0; j<2; j++)
  1527. {
  1528. put_bits(data, &pos, 1, qep[j*8]&1);
  1529. }
  1530. if (mode == 0 || mode == 3 || mode == 7)
  1531. for (uniform int j=0; j<pairs*2; j++)
  1532. {
  1533. put_bits(data, &pos, 1, qep[j*4]&1);
  1534. }
  1535. // quantized values
  1536. bc7_code_qblock(data, &pos, qblock, bits, flips);
  1537. bc7_code_adjust_skip_mode01237(data, mode, part_id);
  1538. }
  1539. void bc7_code_mode45(uint32 data[5], varying mode45_parameters* uniform params, uniform int mode)
  1540. {
  1541. int qep[8];
  1542. uint32 qblock[2];
  1543. int aqep[2];
  1544. uint32 aqblock[2];
  1545. swap_ints(params->qep, qep, 8);
  1546. swap_uints(params->qblock, qblock, 2);
  1547. swap_ints(params->aqep, aqep, 2);
  1548. swap_uints(params->aqblock, aqblock, 2);
  1549. int rotation = params->rotation;
  1550. int swap = params->swap;
  1551. uniform int bits = 2;
  1552. uniform int abits = 2; if (mode==4) abits = 3;
  1553. uniform int epbits = 7; if (mode==4) epbits = 5;
  1554. uniform int aepbits = 8; if (mode==4) aepbits = 6;
  1555. if (!swap)
  1556. {
  1557. bc7_code_apply_swap_mode456(qep, 4, qblock, bits);
  1558. bc7_code_apply_swap_mode456(aqep, 1, aqblock, abits);
  1559. }
  1560. else
  1561. {
  1562. swap_uints(qblock, aqblock, 2);
  1563. bc7_code_apply_swap_mode456(aqep, 1, qblock, bits);
  1564. bc7_code_apply_swap_mode456(qep, 4, aqblock, abits);
  1565. }
  1566. for (uniform int k=0; k<5; k++) data[k] = 0;
  1567. uniform int pos = 0;
  1568. // mode 4-5
  1569. put_bits(data, &pos, mode+1, 1<<mode);
  1570. // rotation
  1571. //put_bits(data, &pos, 2, (rotation+1)%4);
  1572. put_bits(data, &pos, 2, (rotation+1)&3);
  1573. if (mode==4)
  1574. {
  1575. put_bits(data, &pos, 1, swap);
  1576. }
  1577. // endpoints
  1578. for (uniform int p=0; p<3; p++)
  1579. {
  1580. put_bits(data, &pos, epbits, qep[0+p]);
  1581. put_bits(data, &pos, epbits, qep[4+p]);
  1582. }
  1583. // alpha endpoints
  1584. put_bits(data, &pos, aepbits, aqep[0]);
  1585. put_bits(data, &pos, aepbits, aqep[1]);
  1586. // quantized values
  1587. bc7_code_qblock(data, &pos, qblock, bits, 0);
  1588. bc7_code_qblock(data, &pos, aqblock, abits, 0);
  1589. }
  1590. void bc7_code_mode6(uint32 data[5], int qep[8], uint32 qblock[2])
  1591. {
  1592. bc7_code_apply_swap_mode456(qep, 4, qblock, 4);
  1593. for (uniform int k=0; k<5; k++) data[k] = 0;
  1594. uniform int pos = 0;
  1595. // mode 6
  1596. put_bits(data, &pos, 7, 64);
  1597. // endpoints
  1598. for (uniform int p=0; p<4; p++)
  1599. {
  1600. put_bits(data, &pos, 7, qep[0+p]>>1);
  1601. put_bits(data, &pos, 7, qep[4+p]>>1);
  1602. }
  1603. // p bits
  1604. put_bits(data, &pos, 1, qep[0]&1);
  1605. put_bits(data, &pos, 1, qep[4]&1);
  1606. // quantized values
  1607. bc7_code_qblock(data, &pos, qblock, 4, 0);
  1608. }
  1609. //////////////////////////
  1610. // BC7 core
  1611. inline void CompressBlockBC7_core(bc7_enc_state state[])
  1612. {
  1613. if (state->mode_selection[0]) bc7_enc_mode02(state);
  1614. if (state->mode_selection[1]) bc7_enc_mode13(state);
  1615. if (state->mode_selection[1]) bc7_enc_mode7(state);
  1616. if (state->mode_selection[2]) bc7_enc_mode45(state);
  1617. if (state->mode_selection[3]) bc7_enc_mode6(state);
  1618. }
  1619. void bc7_enc_copy_settings(bc7_enc_state state[], uniform bc7_enc_settings settings[])
  1620. {
  1621. state->channels = settings->channels;
  1622. // mode02
  1623. state->mode_selection[0] = settings->mode_selection[0];
  1624. state->skip_mode2 = settings->skip_mode2;
  1625. state->refineIterations[0] = settings->refineIterations[0];
  1626. state->refineIterations[2] = settings->refineIterations[2];
  1627. // mode137
  1628. state->mode_selection[1] = settings->mode_selection[1];
  1629. state->fastSkipTreshold_mode1 = settings->fastSkipTreshold_mode1;
  1630. state->fastSkipTreshold_mode3 = settings->fastSkipTreshold_mode3;
  1631. state->fastSkipTreshold_mode7 = settings->fastSkipTreshold_mode7;
  1632. state->refineIterations[1] = settings->refineIterations[1];
  1633. state->refineIterations[3] = settings->refineIterations[3];
  1634. state->refineIterations[7] = settings->refineIterations[7];
  1635. // mode45
  1636. state->mode_selection[2] = settings->mode_selection[2];
  1637. state->mode45_channel0 = settings->mode45_channel0;
  1638. state->refineIterations_channel = settings->refineIterations_channel;
  1639. state->refineIterations[4] = settings->refineIterations[4];
  1640. state->refineIterations[5] = settings->refineIterations[5];
  1641. // mode6
  1642. state->mode_selection[3] = settings->mode_selection[3];
  1643. state->refineIterations[6] = settings->refineIterations[6];
  1644. }
  1645. inline void CompressBlockBC7(uniform rgba_surface src[], int xx, uniform int yy, uniform uint8 dst[],
  1646. uniform bc7_enc_settings settings[])
  1647. {
  1648. bc7_enc_state _state;
  1649. varying bc7_enc_state* uniform state = &_state;
  1650. bc7_enc_copy_settings(state, settings);
  1651. load_block_interleaved_rgba(state->block, src, xx, yy);
  1652. state->best_err = 1e99;
  1653. state->opaque_err = compute_opaque_err(state->block, state->channels);
  1654. CompressBlockBC7_core(state);
  1655. store_data(dst, src->width, xx, yy, state->best_data, 4);
  1656. }
  1657. static export void CompressBlocksBC7_ispc(uniform rgba_surface src[], uniform uint8 dst[], uniform bc7_enc_settings settings[])
  1658. {
  1659. for (uniform int yy = 0; yy<src->height/4; yy++)
  1660. foreach (xx = 0 ... src->width/4)
  1661. {
  1662. CompressBlockBC7(src, xx, yy, dst, settings);
  1663. }
  1664. }
  1665. ///////////////////////////////////////////////////////////
  1666. // BC6H encoding
  1667. struct bc6h_enc_settings
  1668. {
  1669. bool slow_mode;
  1670. bool fast_mode;
  1671. int refineIterations_1p;
  1672. int refineIterations_2p;
  1673. int fastSkipTreshold;
  1674. };
  1675. struct bc6h_enc_state
  1676. {
  1677. float block[64];
  1678. float best_err;
  1679. uint32 best_data[5]; // 4, +1 margin for skips
  1680. float rgb_bounds[6];
  1681. float max_span;
  1682. int max_span_idx;
  1683. int mode;
  1684. int epb;
  1685. int qbounds[8];
  1686. // settings
  1687. uniform bool slow_mode;
  1688. uniform bool fast_mode;
  1689. uniform int refineIterations_1p;
  1690. uniform int refineIterations_2p;
  1691. uniform int fastSkipTreshold;
  1692. };
  1693. void bc6h_code_2p(uint32 data[5], int pqep[], uint32 qblock[2], int part_id, int mode);
  1694. void bc6h_code_1p(uint32 data[5], int qep[8], uint32 qblock[2], int mode);
  1695. ///////////////////////////
  1696. // BC6H format data
  1697. inline uniform int get_mode_prefix(uniform int mode)
  1698. {
  1699. static uniform const int mode_prefix_table[] =
  1700. {
  1701. 0, 1, 2, 6, 10, 14, 18, 22, 26, 30, 3, 7, 11, 15
  1702. };
  1703. return mode_prefix_table[mode];
  1704. }
  1705. inline uniform float get_span(uniform int mode)
  1706. {
  1707. static uniform const float span_table[] =
  1708. {
  1709. 0.9 * 0xFFFF / 64, // (0) 4 / 10
  1710. 0.9 * 0xFFFF / 4, // (1) 5 / 7
  1711. 0.8 * 0xFFFF / 256, // (2) 3 / 11
  1712. -1, -1,
  1713. 0.9 * 0xFFFF / 32, // (5) 4 / 9
  1714. 0.9 * 0xFFFF / 16, // (6) 4 / 8
  1715. -1, -1,
  1716. 0xFFFF, // (9) absolute
  1717. 0xFFFF, // (10) absolute
  1718. 0.95 * 0xFFFF / 8, // (11) 8 / 11
  1719. 0.95 * 0xFFFF / 32, // (12) 7 / 12
  1720. 6, // (13) 3 / 16
  1721. };
  1722. uniform int span = span_table[mode];
  1723. assert(span > 0);
  1724. return span;
  1725. }
  1726. inline uniform int get_mode_bits(uniform int mode)
  1727. {
  1728. static uniform const int mode_bits_table[] =
  1729. {
  1730. 10, 7, 11, -1, -1,
  1731. 9, 8, -1, -1, 6,
  1732. 10, 11, 12, 16,
  1733. };
  1734. uniform int mode_bits = mode_bits_table[mode];
  1735. assert(mode_bits > 0);
  1736. return mode_bits;
  1737. }
  1738. ///////////////////////////
  1739. // endpoint quantization
  1740. inline int unpack_to_uf16(uint32 v, int bits)
  1741. {
  1742. if (bits >= 15) return v;
  1743. if (v == 0) return 0;
  1744. if (v == (1<<bits)-1) return 0xFFFF;
  1745. return (v * 2 + 1) << (15-bits);
  1746. }
  1747. void ep_quant_bc6h(int qep[], float ep[], int bits, uniform int pairs)
  1748. {
  1749. int levels = 1 << bits;
  1750. for (uniform int i = 0; i < 8 * pairs; i++)
  1751. {
  1752. int v = ((int)(ep[i] / (256 * 256f - 1) * (levels - 1) + 0.5));
  1753. qep[i] = clamp(v, 0, levels - 1);
  1754. }
  1755. }
  1756. void ep_dequant_bc6h(float ep[], int qep[], int bits, uniform int pairs)
  1757. {
  1758. for (uniform int i = 0; i < 8 * pairs; i++)
  1759. ep[i] = unpack_to_uf16(qep[i], bits);
  1760. }
  1761. void ep_quant_dequant_bc6h(bc6h_enc_state state[], int qep[], float ep[], uniform int pairs)
  1762. {
  1763. int bits = state->epb;
  1764. ep_quant_bc6h(qep, ep, bits, pairs);
  1765. for (uniform int i = 0; i < 2 * pairs; i++)
  1766. for (uniform int p = 0; p < 3; p++)
  1767. {
  1768. qep[i * 4 + p] = clamp(qep[i * 4 + p], state->qbounds[p], state->qbounds[4 + p]);
  1769. }
  1770. ep_dequant_bc6h(ep, qep, bits, pairs);
  1771. }
  1772. //////////////////////////
  1773. // parameter estimation
  1774. float bc6h_enc_2p_part_fast(bc6h_enc_state state[], int qep[16], uint32 qblock[2], int part_id)
  1775. {
  1776. uint32 pattern = get_pattern(part_id);
  1777. uniform int bits = 3;
  1778. uniform int pairs = 2;
  1779. uniform int channels = 3;
  1780. float ep[16];
  1781. for (uniform int j = 0; j<pairs; j++)
  1782. {
  1783. int mask = get_pattern_mask(part_id, j);
  1784. block_segment_core(&ep[j * 8], state->block, mask, channels);
  1785. }
  1786. ep_quant_dequant_bc6h(state, qep, ep, 2);
  1787. float total_err = block_quant(qblock, state->block, bits, ep, pattern, channels);
  1788. return total_err;
  1789. }
  1790. void bc6h_enc_2p_list(bc6h_enc_state state[], int part_list[], uniform int part_count)
  1791. {
  1792. if (part_count == 0) return;
  1793. uniform int bits = 3;
  1794. uniform int pairs = 2;
  1795. uniform int channels = 3;
  1796. int best_qep[24];
  1797. uint32 best_qblock[2];
  1798. int best_part_id = -1;
  1799. float best_err = 1e99;
  1800. for (uniform int part = 0; part<part_count; part++)
  1801. {
  1802. int part_id = part_list[part] & 31;
  1803. int qep[24];
  1804. uint32 qblock[2];
  1805. float err = bc6h_enc_2p_part_fast(state, qep, qblock, part_id);
  1806. if (err<best_err)
  1807. {
  1808. for (uniform int i = 0; i<8 * pairs; i++) best_qep[i] = qep[i];
  1809. for (uniform int k = 0; k<2; k++) best_qblock[k] = qblock[k];
  1810. best_part_id = part_id;
  1811. best_err = err;
  1812. }
  1813. }
  1814. // refine
  1815. uniform int refineIterations = state->refineIterations_2p;
  1816. for (uniform int _ = 0; _<refineIterations; _++)
  1817. {
  1818. float ep[24];
  1819. for (uniform int j = 0; j<pairs; j++)
  1820. {
  1821. int mask = get_pattern_mask(best_part_id, j);
  1822. opt_endpoints(&ep[j * 8], state->block, bits, best_qblock, mask, channels);
  1823. }
  1824. int qep[24];
  1825. uint32 qblock[2];
  1826. ep_quant_dequant_bc6h(state, qep, ep, 2);
  1827. uint32 pattern = get_pattern(best_part_id);
  1828. float err = block_quant(qblock, state->block, bits, ep, pattern, channels);
  1829. if (err<best_err)
  1830. {
  1831. for (uniform int i = 0; i<8 * pairs; i++) best_qep[i] = qep[i];
  1832. for (uniform int k = 0; k<2; k++) best_qblock[k] = qblock[k];
  1833. best_err = err;
  1834. }
  1835. }
  1836. if (best_err<state->best_err)
  1837. {
  1838. state->best_err = best_err;
  1839. bc6h_code_2p(state->best_data, best_qep, best_qblock, best_part_id, state->mode);
  1840. }
  1841. }
  1842. void bc6h_enc_2p(bc6h_enc_state state[])
  1843. {
  1844. float full_stats[15];
  1845. compute_stats_masked(full_stats, state->block, -1, 3);
  1846. int part_list[32];
  1847. for (uniform int part = 0; part < 32; part++)
  1848. {
  1849. int mask = get_pattern_mask(part, 0);
  1850. float bound12 = block_pca_bound_split(state->block, mask, full_stats, 3);
  1851. int bound = (int)(bound12);
  1852. part_list[part] = part + bound * 64;
  1853. }
  1854. partial_sort_list(part_list, 32, state->fastSkipTreshold);
  1855. bc6h_enc_2p_list(state, part_list, state->fastSkipTreshold);
  1856. }
  1857. void bc6h_enc_1p(bc6h_enc_state state[])
  1858. {
  1859. float ep[8];
  1860. block_segment_core(ep, state->block, -1, 3);
  1861. int qep[8];
  1862. ep_quant_dequant_bc6h(state, qep, ep, 1);
  1863. uint32 qblock[2];
  1864. float err = block_quant(qblock, state->block, 4, ep, 0, 3);
  1865. // refine
  1866. uniform int refineIterations = state->refineIterations_1p;
  1867. for (uniform int i = 0; i<refineIterations; i++)
  1868. {
  1869. opt_endpoints(ep, state->block, 4, qblock, -1, 3);
  1870. ep_quant_dequant_bc6h(state, qep, ep, 1);
  1871. err = block_quant(qblock, state->block, 4, ep, 0, 3);
  1872. }
  1873. if (err < state->best_err)
  1874. {
  1875. state->best_err = err;
  1876. bc6h_code_1p(state->best_data, qep, qblock, state->mode);
  1877. }
  1878. }
  1879. inline void compute_qbounds(bc6h_enc_state state[], float rgb_span[3])
  1880. {
  1881. float bounds[8];
  1882. for (uniform int p = 0; p < 3; p++)
  1883. {
  1884. float middle = (state->rgb_bounds[p] + state->rgb_bounds[3 + p]) / 2;
  1885. bounds[ p] = middle - rgb_span[p] / 2;
  1886. bounds[4+p] = middle + rgb_span[p] / 2;
  1887. }
  1888. ep_quant_bc6h(state->qbounds, bounds, state->epb, 1);
  1889. }
  1890. void compute_qbounds(bc6h_enc_state state[], float span)
  1891. {
  1892. float rgb_span[3] = { span, span, span };
  1893. compute_qbounds(state, rgb_span);
  1894. }
  1895. void compute_qbounds2(bc6h_enc_state state[], float span, int max_span_idx)
  1896. {
  1897. float rgb_span[3] = { span, span, span };
  1898. for (uniform int p = 0; p < 3; p++)
  1899. {
  1900. rgb_span[p] *= (p == max_span_idx) ? 2 : 1;
  1901. }
  1902. compute_qbounds(state, rgb_span);
  1903. }
  1904. void bc6h_test_mode(bc6h_enc_state state[], uniform int mode, uniform bool enc, uniform float margin)
  1905. {
  1906. uniform int mode_bits = get_mode_bits(mode);
  1907. uniform float span = get_span(mode);
  1908. float max_span = state->max_span;
  1909. int max_span_idx = state->max_span_idx;
  1910. if (max_span * margin > span) return;
  1911. if (mode >= 10)
  1912. {
  1913. state->epb = mode_bits;
  1914. state->mode = mode;
  1915. compute_qbounds(state, span);
  1916. if (enc) bc6h_enc_1p(state);
  1917. }
  1918. else if (mode <= 1 || mode == 5 || mode == 9)
  1919. {
  1920. state->epb = mode_bits;
  1921. state->mode = mode;
  1922. compute_qbounds(state, span);
  1923. if (enc) bc6h_enc_2p(state);
  1924. }
  1925. else
  1926. {
  1927. state->epb = mode_bits;
  1928. state->mode = mode + max_span_idx;
  1929. compute_qbounds2(state, span, max_span_idx);
  1930. if (enc) bc6h_enc_2p(state);
  1931. }
  1932. }
  1933. //////////////////////////
  1934. // BC6H bitstream coding
  1935. int bit_at(int v, uniform int pos)
  1936. {
  1937. return (v >> pos) & 1;
  1938. }
  1939. uint32 reverse_bits(uint32 v, uniform int bits)
  1940. {
  1941. if (bits == 2)
  1942. {
  1943. return (v >> 1) + (v & 1) * 2;
  1944. }
  1945. if (bits == 6)
  1946. {
  1947. v = (v & 0x5555) * 2 + ((v >> 1) & 0x5555);
  1948. return (v >> 4) + ((v >> 2) & 3) * 4 + (v & 3) * 16;
  1949. }
  1950. else
  1951. {
  1952. assert(false);
  1953. }
  1954. }
  1955. void bc6h_pack(uint32 packed[], int qep[], int mode)
  1956. {
  1957. if (mode == 0)
  1958. {
  1959. int pred_qep[16];
  1960. for (uniform int p = 0; p < 3; p++)
  1961. {
  1962. pred_qep[ p] = qep[p];
  1963. pred_qep[ 4 + p] = (qep[ 4 + p] - qep[p]) & 31;
  1964. pred_qep[ 8 + p] = (qep[ 8 + p] - qep[p]) & 31;
  1965. pred_qep[12 + p] = (qep[12 + p] - qep[p]) & 31;
  1966. }
  1967. for (uniform int i = 1; i < 4; i++)
  1968. for (uniform int p = 0; p < 3; p++)
  1969. {
  1970. assert( qep[i * 4 + p] - qep[p] <= 15);
  1971. assert(-16 <= qep[i * 4 + p] - qep[p]);
  1972. }
  1973. /*
  1974. g2[4], b2[4], b3[4],
  1975. r0[9:0],
  1976. g0[9:0],
  1977. b0[9:0],
  1978. r1[4:0], g3[4], g2[3:0],
  1979. g1[4:0], b3[0], g3[3:0],
  1980. b1[4:0], b3[1], b2[3:0],
  1981. r2[4:0], b3[2],
  1982. r3[4:0], b3[3]
  1983. */
  1984. uint32 pqep[10];
  1985. pqep[4] = pred_qep[4] + (pred_qep[ 8 + 1] & 15) * 64;
  1986. pqep[5] = pred_qep[5] + (pred_qep[12 + 1] & 15) * 64;
  1987. pqep[6] = pred_qep[6] + (pred_qep[ 8 + 2] & 15) * 64;
  1988. pqep[4] += bit_at(pred_qep[12 + 1], 4) << 5;
  1989. pqep[5] += bit_at(pred_qep[12 + 2], 0) << 5;
  1990. pqep[6] += bit_at(pred_qep[12 + 2], 1) << 5;
  1991. pqep[8] = pred_qep[ 8] + bit_at(pred_qep[12 + 2], 2) * 32;
  1992. pqep[9] = pred_qep[12] + bit_at(pred_qep[12 + 2], 3) * 32;
  1993. packed[0] = get_mode_prefix(0);
  1994. packed[0] += bit_at(pred_qep[ 8 + 1], 4) << 2;
  1995. packed[0] += bit_at(pred_qep[ 8 + 2], 4) << 3;
  1996. packed[0] += bit_at(pred_qep[12 + 2], 4) << 4;
  1997. packed[1] = (pred_qep[2] << 20) + (pred_qep[1] << 10) + pred_qep[0];
  1998. packed[2] = (pqep[6] << 20) + (pqep[5] << 10) + pqep[4];
  1999. packed[3] = (pqep[9] << 6) + pqep[8];
  2000. }
  2001. else if (mode == 1)
  2002. {
  2003. int pred_qep[16];
  2004. for (uniform int p = 0; p < 3; p++)
  2005. {
  2006. pred_qep[ p] = qep[p];
  2007. pred_qep[ 4 + p] = (qep[ 4 + p] - qep[p]) & 63;
  2008. pred_qep[ 8 + p] = (qep[ 8 + p] - qep[p]) & 63;
  2009. pred_qep[12 + p] = (qep[12 + p] - qep[p]) & 63;
  2010. }
  2011. for (uniform int i = 1; i < 4; i++)
  2012. for (uniform int p = 0; p < 3; p++)
  2013. {
  2014. assert( qep[i * 4 + p] - qep[p] <= 31);
  2015. assert(-32 <= qep[i * 4 + p] - qep[p]);
  2016. }
  2017. /*
  2018. g2[5], g3[4], g3[5],
  2019. r0[6:0], b3[0], b3[1], b2[4],
  2020. g0[6:0], b2[5], b3[2], g2[4],
  2021. b0[6:0], b3[3], b3[5], b3[4],
  2022. r1[5:0], g2[3:0],
  2023. g1[5:0], g3[3:0],
  2024. b1[5:0], b2[3:0],
  2025. r2[5:0],
  2026. r3[5:0]
  2027. */
  2028. uint32 pqep[8];
  2029. pqep[0] = pred_qep[0];
  2030. pqep[0] += bit_at(pred_qep[12 + 2], 0) << 7;
  2031. pqep[0] += bit_at(pred_qep[12 + 2], 1) << 8;
  2032. pqep[0] += bit_at(pred_qep[ 8 + 2], 4) << 9;
  2033. pqep[1] = pred_qep[1];
  2034. pqep[1] += bit_at(pred_qep[ 8 + 2], 5) << 7;
  2035. pqep[1] += bit_at(pred_qep[12 + 2], 2) << 8;
  2036. pqep[1] += bit_at(pred_qep[ 8 + 1], 4) << 9;
  2037. pqep[2] = pred_qep[2];
  2038. pqep[2] += bit_at(pred_qep[12 + 2], 3) << 7;
  2039. pqep[2] += bit_at(pred_qep[12 + 2], 5) << 8;
  2040. pqep[2] += bit_at(pred_qep[12 + 2], 4) << 9;
  2041. pqep[4] = pred_qep[4] + (pred_qep[ 8 + 1] & 15) * 64;
  2042. pqep[5] = pred_qep[5] + (pred_qep[12 + 1] & 15) * 64;
  2043. pqep[6] = pred_qep[6] + (pred_qep[ 8 + 2] & 15) * 64;
  2044. packed[0] = get_mode_prefix(1);
  2045. packed[0] += bit_at(pred_qep[ 8 + 1], 5) << 2;
  2046. packed[0] += bit_at(pred_qep[12 + 1], 4) << 3;
  2047. packed[0] += bit_at(pred_qep[12 + 1], 5) << 4;
  2048. packed[1] = (pqep[2] << 20) + (pqep[1] << 10) + pqep[0];
  2049. packed[2] = (pqep[6] << 20) + (pqep[5] << 10) + pqep[4];
  2050. packed[3] = (pred_qep[12] << 6) + pred_qep[8];
  2051. }
  2052. else if (mode == 2 || mode == 3 || mode == 4)
  2053. {
  2054. /*
  2055. r0[9:0], g0[9:0], b0[9:0],
  2056. r1[3:0], xx[y], xx[y], g2[3:0],
  2057. g1[3:0], xx[y], xx[y], g3[3:0],
  2058. b1[3:0], xx[y], xx[y], b2[3:0],
  2059. r2[3:0], xx[y], xx[y],
  2060. r3[3:0], xx[y], xx[y]
  2061. */
  2062. int dqep[16];
  2063. for (uniform int p = 0; p < 3; p++)
  2064. {
  2065. int mask = 15;
  2066. if (p == mode - 2) mask = 31;
  2067. dqep[p] = qep[p];
  2068. dqep[ 4 + p] = (qep[ 4 + p] - qep[p]) & mask;
  2069. dqep[ 8 + p] = (qep[ 8 + p] - qep[p]) & mask;
  2070. dqep[12 + p] = (qep[12 + p] - qep[p]) & mask;
  2071. }
  2072. for (uniform int i = 1; i < 4; i++)
  2073. for (uniform int p = 0; p < 3; p++)
  2074. {
  2075. int bits = 4;
  2076. if (p == mode - 2) bits = 5;
  2077. assert( qep[i * 4 + p] - qep[p] <= (1<<bits)/2 - 1);
  2078. assert(-(1<<bits)/2 <= qep[i * 4 + p] - qep[p]);
  2079. }
  2080. uint32 pqep[10];
  2081. pqep[0] = dqep[0] & 1023;
  2082. pqep[1] = dqep[1] & 1023;
  2083. pqep[2] = dqep[2] & 1023;
  2084. pqep[4] = dqep[4] + (dqep[ 8 + 1] & 15) * 64;
  2085. pqep[5] = dqep[5] + (dqep[12 + 1] & 15) * 64;
  2086. pqep[6] = dqep[6] + (dqep[ 8 + 2] & 15) * 64;
  2087. pqep[8] = dqep[8];
  2088. pqep[9] = dqep[12];
  2089. if (mode == 2)
  2090. {
  2091. /*
  2092. r0[9:0], g0[9:0], b0[9:0],
  2093. r1[3:0], r1[4], r0[10], g2[3:0],
  2094. g1[3:0], g0[10], b3[0], g3[3:0],
  2095. b1[3:0], b0[10], b3[1], b2[3:0],
  2096. r2[3:0], r2[4], b3[2],
  2097. r3[3:0], r3[4], b3[3]
  2098. */
  2099. packed[0] = get_mode_prefix(2);
  2100. //
  2101. pqep[5] += bit_at(dqep[0 + 1], 10) << 4;
  2102. pqep[6] += bit_at(dqep[0 + 2], 10) << 4;
  2103. //
  2104. //
  2105. pqep[4] += bit_at(dqep[0 + 0], 10) << 5;
  2106. pqep[5] += bit_at(dqep[12 + 2], 0) << 5;
  2107. pqep[6] += bit_at(dqep[12 + 2], 1) << 5;
  2108. pqep[8] += bit_at(dqep[12 + 2], 2) << 5;
  2109. pqep[9] += bit_at(dqep[12 + 2], 3) << 5;
  2110. }
  2111. if (mode == 3)
  2112. {
  2113. /*
  2114. r0[9:0], g0[9:0], b0[9:0],
  2115. r1[3:0], r0[10], g3[4], g2[3:0],
  2116. g1[3:0], g1[4], g0[10], g3[3:0],
  2117. b1[3:0], b0[10], b3[1], b2[3:0],
  2118. r2[3:0], b3[0], b3[2],
  2119. r3[3:0], g2[4], b3[3]
  2120. */
  2121. packed[0] = get_mode_prefix(3);
  2122. pqep[4] += bit_at(dqep[0 + 0], 10) << 4;
  2123. //
  2124. pqep[6] += bit_at(dqep[0 + 2], 10) << 4;
  2125. pqep[8] += bit_at(dqep[12 + 2], 0) << 4;
  2126. pqep[9] += bit_at(dqep[ 8 + 1], 4) << 4;
  2127. pqep[4] += bit_at(dqep[12 + 1], 4) << 5;
  2128. pqep[5] += bit_at(dqep[0 + 1], 10) << 5;
  2129. pqep[6] += bit_at(dqep[12 + 2], 1) << 5;
  2130. pqep[8] += bit_at(dqep[12 + 2], 2) << 5;
  2131. pqep[9] += bit_at(dqep[12 + 2], 3) << 5;
  2132. }
  2133. if (mode == 4)
  2134. {
  2135. /*
  2136. r0[9:0], g0[9:0], b0[9:0],
  2137. r1[3:0], r0[10], b2[4], g2[3:0],
  2138. g1[3:0], g0[10], b3[0], g3[3:0],
  2139. b1[3:0], b1[4], b0[10], b2[3:0],
  2140. r2[3:0], b3[1], b3[2],
  2141. r3[3:0], b3[4], b3[3]
  2142. */
  2143. packed[0] = get_mode_prefix(4);
  2144. pqep[4] += bit_at(dqep[0 + 0], 10) << 4;
  2145. pqep[5] += bit_at(dqep[0 + 1], 10) << 4;
  2146. //
  2147. pqep[8] += bit_at(dqep[12 + 2], 1) << 4;
  2148. pqep[9] += bit_at(dqep[12 + 2], 4) << 4;
  2149. pqep[4] += bit_at(dqep[ 8 + 2], 4) << 5;
  2150. pqep[5] += bit_at(dqep[12 + 2], 0) << 5;
  2151. pqep[6] += bit_at(dqep[0 + 2], 10) << 5;
  2152. pqep[8] += bit_at(dqep[12 + 2], 2) << 5;
  2153. pqep[9] += bit_at(dqep[12 + 2], 3) << 5;
  2154. }
  2155. packed[1] = (pqep[2] << 20) + (pqep[1] << 10) + pqep[0];
  2156. packed[2] = (pqep[6] << 20) + (pqep[5] << 10) + pqep[4];
  2157. packed[3] = (pqep[9] << 6) + pqep[8];
  2158. }
  2159. else if (mode == 5)
  2160. {
  2161. int dqep[16];
  2162. for (uniform int p = 0; p < 3; p++)
  2163. {
  2164. dqep[p] = qep[p];
  2165. dqep[ 4 + p] = (qep[ 4 + p] - qep[p]) & 31;
  2166. dqep[ 8 + p] = (qep[ 8 + p] - qep[p]) & 31;
  2167. dqep[12 + p] = (qep[12 + p] - qep[p]) & 31;
  2168. }
  2169. for (uniform int i = 1; i < 4; i++)
  2170. for (uniform int p = 0; p < 3; p++)
  2171. {
  2172. assert( qep[i * 4 + p] - qep[p] <= 15);
  2173. assert(-16 <= qep[i * 4 + p] - qep[p]);
  2174. }
  2175. /*
  2176. r0[8:0], b2[4],
  2177. g0[8:0], g2[4],
  2178. b0[8:0], b3[4],
  2179. r1[4:0], g3[4], g2[3:0],
  2180. g1[4:0], b3[0], g3[3:0],
  2181. b1[4:0], b3[1], b2[3:0],
  2182. r2[4:0], b3[2],
  2183. r3[4:0], b3[3]
  2184. */
  2185. uint32 pqep[10];
  2186. pqep[0] = dqep[0];
  2187. pqep[1] = dqep[1];
  2188. pqep[2] = dqep[2];
  2189. pqep[4] = dqep[4] + (dqep[ 8 + 1] & 15) * 64;
  2190. pqep[5] = dqep[5] + (dqep[12 + 1] & 15) * 64;
  2191. pqep[6] = dqep[6] + (dqep[ 8 + 2] & 15) * 64;
  2192. pqep[8] = dqep[8];
  2193. pqep[9] = dqep[12];
  2194. pqep[0] += bit_at(dqep[ 8 + 2], 4) << 9;
  2195. pqep[1] += bit_at(dqep[ 8 + 1], 4) << 9;
  2196. pqep[2] += bit_at(dqep[12 + 2], 4) << 9;
  2197. pqep[4] += bit_at(dqep[12 + 1], 4) << 5;
  2198. pqep[5] += bit_at(dqep[12 + 2], 0) << 5;
  2199. pqep[6] += bit_at(dqep[12 + 2], 1) << 5;
  2200. pqep[8] += bit_at(dqep[12 + 2], 2) << 5;
  2201. pqep[9] += bit_at(dqep[12 + 2], 3) << 5;
  2202. packed[0] = get_mode_prefix(5);
  2203. packed[1] = (pqep[2] << 20) + (pqep[1] << 10) + pqep[0];
  2204. packed[2] = (pqep[6] << 20) + (pqep[5] << 10) + pqep[4];
  2205. packed[3] = (pqep[9] << 6) + pqep[8];
  2206. }
  2207. else if (mode == 6 || mode == 7 || mode == 8)
  2208. {
  2209. /*
  2210. r0[7:0], xx[y], b2[4],
  2211. g0[7:0], xx[y], g2[4],
  2212. b0[7:0], xx[y], b3[4],
  2213. r1[4:0], xx[y], g2[3:0],
  2214. g1[4:0], xx[y], g3[3:0],
  2215. b1[4:0], xx[y], b2[3:0],
  2216. r2[4:0], xx[y],
  2217. r3[4:0], xx[y]
  2218. */
  2219. int dqep[16];
  2220. for (uniform int p = 0; p < 3; p++)
  2221. {
  2222. int mask = 31;
  2223. if (p == mode - 6) mask = 63;
  2224. dqep[p] = qep[p];
  2225. dqep[ 4 + p] = (qep[ 4 + p] - qep[p]) & mask;
  2226. dqep[ 8 + p] = (qep[ 8 + p] - qep[p]) & mask;
  2227. dqep[12 + p] = (qep[12 + p] - qep[p]) & mask;
  2228. }
  2229. for (uniform int i = 1; i < 4; i++)
  2230. for (uniform int p = 0; p < 3; p++)
  2231. {
  2232. int bits = 5;
  2233. if (p == mode - 6) bits = 6;
  2234. assert( qep[i * 4 + p] - qep[p] <= (1<<bits)/2 - 1);
  2235. assert(-(1<<bits)/2 <= qep[i * 4 + p] - qep[p]);
  2236. }
  2237. uint32 pqep[10];
  2238. pqep[0] = dqep[0];
  2239. pqep[0] += bit_at(dqep[ 8 + 2], 4) << 9;
  2240. pqep[1] = dqep[1];
  2241. pqep[1] += bit_at(dqep[ 8 + 1], 4) << 9;
  2242. pqep[2] = dqep[2];
  2243. pqep[2] += bit_at(dqep[12 + 2], 4) << 9;
  2244. pqep[4] = dqep[4] + (dqep[ 8 + 1] & 15) * 64;
  2245. pqep[5] = dqep[5] + (dqep[12 + 1] & 15) * 64;
  2246. pqep[6] = dqep[6] + (dqep[ 8 + 2] & 15) * 64;
  2247. pqep[8] = dqep[8];
  2248. pqep[9] = dqep[12];
  2249. if (mode == 6)
  2250. {
  2251. /*
  2252. r0[7:0], g3[4], b2[4],
  2253. g0[7:0], b3[2], g2[4],
  2254. b0[7:0], b3[3], b3[4],
  2255. r1[4:0], r1[5], g2[3:0],
  2256. g1[4:0], b3[0], g3[3:0],
  2257. b1[4:0], b3[1], b2[3:0],
  2258. r2[5:0],
  2259. r3[5:0]
  2260. */
  2261. packed[0] = get_mode_prefix(6);
  2262. pqep[0] += bit_at(dqep[12 + 1], 4) << 8;
  2263. pqep[1] += bit_at(dqep[12 + 2], 2) << 8;
  2264. pqep[2] += bit_at(dqep[12 + 2], 3) << 8;
  2265. //
  2266. pqep[5] += bit_at(dqep[12 + 2], 0) << 5;
  2267. pqep[6] += bit_at(dqep[12 + 2], 1) << 5;
  2268. //
  2269. //
  2270. }
  2271. if (mode == 7)
  2272. {
  2273. /*
  2274. r0[7:0], b3[0], b2[4],
  2275. g0[7:0], g2[5], g2[4],
  2276. b0[7:0], g3[5], b3[4],
  2277. r1[4:0], g3[4], g2[3:0],
  2278. g1[4:0], g1[5], g3[3:0],
  2279. b1[4:0], b3[1], b2[3:0],
  2280. r2[4:0], b3[2],
  2281. r3[4:0], b3[3]
  2282. */
  2283. packed[0] = get_mode_prefix(7);
  2284. pqep[0] += bit_at(dqep[12 + 2], 0) << 8;
  2285. pqep[1] += bit_at(dqep[ 8 + 1], 5) << 8;
  2286. pqep[2] += bit_at(dqep[12 + 1], 5) << 8;
  2287. pqep[4] += bit_at(dqep[12 + 1], 4) << 5;
  2288. //
  2289. pqep[6] += bit_at(dqep[12 + 2], 1) << 5;
  2290. pqep[8] += bit_at(dqep[12 + 2], 2) << 5;
  2291. pqep[9] += bit_at(dqep[12 + 2], 3) << 5;
  2292. }
  2293. if (mode == 8)
  2294. {
  2295. /*
  2296. r0[7:0], b3[1], b2[4],
  2297. g0[7:0], b2[5], g2[4],
  2298. b0[7:0], b3[5], b3[4],
  2299. r1[4:0], g3[4], g2[3:0],
  2300. g1[4:0], b3[0], g3[3:0],
  2301. b1[4:0], b1[5], b2[3:0],
  2302. r2[4:0], b3[2],
  2303. r3[4:0], b3[3]
  2304. */
  2305. packed[0] = get_mode_prefix(8);
  2306. pqep[0] += bit_at(dqep[12 + 2], 1) << 8;
  2307. pqep[1] += bit_at(dqep[ 8 + 2], 5) << 8;
  2308. pqep[2] += bit_at(dqep[12 + 2], 5) << 8;
  2309. pqep[4] += bit_at(dqep[12 + 1], 4) << 5;
  2310. pqep[5] += bit_at(dqep[12 + 2], 0) << 5;
  2311. //
  2312. pqep[8] += bit_at(dqep[12 + 2], 2) << 5;
  2313. pqep[9] += bit_at(dqep[12 + 2], 3) << 5;
  2314. }
  2315. packed[1] = (pqep[2] << 20) + (pqep[1] << 10) + pqep[0];
  2316. packed[2] = (pqep[6] << 20) + (pqep[5] << 10) + pqep[4];
  2317. packed[3] = (pqep[9] << 6) + pqep[8];
  2318. }
  2319. else if (mode == 9)
  2320. {
  2321. /*
  2322. r0[5:0], g3[4], b3[0], b3[1], b2[4], // 10
  2323. g0[5:0], g2[5], b2[5], b3[2], g2[4], // 10
  2324. b0[5:0], g3[5], b3[3], b3[5], b3[4], // 10
  2325. r1[5:0], g2[3:0], // 10
  2326. g1[5:0], g3[3:0], // 10
  2327. b1[5:0], b2[3:0], // 10
  2328. r2[5:0], // 6
  2329. r3[5:0] // 6
  2330. */
  2331. uint32 pqep[10];
  2332. pqep[0] = qep[0];
  2333. pqep[0] += bit_at(qep[12 + 1], 4) << 6;
  2334. pqep[0] += bit_at(qep[12 + 2], 0) << 7;
  2335. pqep[0] += bit_at(qep[12 + 2], 1) << 8;
  2336. pqep[0] += bit_at(qep[ 8 + 2], 4) << 9;
  2337. pqep[1] = qep[1];
  2338. pqep[1] += bit_at(qep[ 8 + 1], 5) << 6;
  2339. pqep[1] += bit_at(qep[ 8 + 2], 5) << 7;
  2340. pqep[1] += bit_at(qep[12 + 2], 2) << 8;
  2341. pqep[1] += bit_at(qep[ 8 + 1], 4) << 9;
  2342. pqep[2] = qep[2];
  2343. pqep[2] += bit_at(qep[12 + 1], 5) << 6;
  2344. pqep[2] += bit_at(qep[12 + 2], 3) << 7;
  2345. pqep[2] += bit_at(qep[12 + 2], 5) << 8;
  2346. pqep[2] += bit_at(qep[12 + 2], 4) << 9;
  2347. pqep[4] = qep[4] + (qep[ 8 + 1] & 15) * 64;
  2348. pqep[5] = qep[5] + (qep[12 + 1] & 15) * 64;
  2349. pqep[6] = qep[6] + (qep[ 8 + 2] & 15) * 64;
  2350. packed[0] = get_mode_prefix(9);
  2351. packed[1] = (pqep[2] << 20) + (pqep[1] << 10) + pqep[0];
  2352. packed[2] = (pqep[6] << 20) + (pqep[5] << 10) + pqep[4];
  2353. packed[3] = (qep[12] << 6) + qep[8];
  2354. }
  2355. else if (mode == 10)
  2356. {
  2357. // the only mode with nothing to do ~
  2358. packed[0] = get_mode_prefix(10);
  2359. packed[1] = (qep[2] << 20) + (qep[1] << 10) + qep[0];
  2360. packed[2] = (qep[6] << 20) + (qep[5] << 10) + qep[4];
  2361. }
  2362. else if (mode == 11)
  2363. {
  2364. int dqep[8];
  2365. for (uniform int p = 0; p < 3; p++)
  2366. {
  2367. dqep[p] = qep[p];
  2368. dqep[4 + p] = (qep[4 + p] - qep[p]) & 511;
  2369. }
  2370. for (uniform int i = 1; i < 2; i++)
  2371. for (uniform int p = 0; p < 3; p++)
  2372. {
  2373. assert( qep[i * 4 + p] - qep[p] <= 255);
  2374. assert(-256 <= qep[i * 4 + p] - qep[p]);
  2375. }
  2376. /*
  2377. r0[9:0], g0[9:0], b0[9:0],
  2378. r1[8:0], r0[10],
  2379. g1[8:0], g0[10],
  2380. b1[8:0], b0[10]
  2381. */
  2382. uint32 pqep[8];
  2383. pqep[0] = dqep[0] & 1023;
  2384. pqep[1] = dqep[1] & 1023;
  2385. pqep[2] = dqep[2] & 1023;
  2386. pqep[4] = dqep[4] + (dqep[0] >> 10) * 512;
  2387. pqep[5] = dqep[5] + (dqep[1] >> 10) * 512;
  2388. pqep[6] = dqep[6] + (dqep[2] >> 10) * 512;
  2389. packed[0] = get_mode_prefix(11);
  2390. packed[1] = (pqep[2] << 20) + (pqep[1] << 10) + pqep[0];
  2391. packed[2] = (pqep[6] << 20) + (pqep[5] << 10) + pqep[4];
  2392. }
  2393. else if (mode == 12)
  2394. {
  2395. int dqep[8];
  2396. for (uniform int p = 0; p < 3; p++)
  2397. {
  2398. dqep[p] = qep[p];
  2399. dqep[4 + p] = (qep[4 + p] - qep[p]) & 255;
  2400. }
  2401. for (uniform int i = 1; i < 2; i++)
  2402. for (uniform int p = 0; p < 3; p++)
  2403. {
  2404. assert( qep[i * 4 + p] - qep[p] <= 127);
  2405. assert(-128 <= qep[i * 4 + p] - qep[p]);
  2406. }
  2407. /*
  2408. r0[9:0], g0[9:0], b0[9:0],
  2409. r1[7:0], r0[10:11],
  2410. g1[7:0], g0[10:11],
  2411. b1[7:0], b0[10:11]
  2412. */
  2413. uint32 pqep[8];
  2414. pqep[0] = dqep[0] & 1023;
  2415. pqep[1] = dqep[1] & 1023;
  2416. pqep[2] = dqep[2] & 1023;
  2417. pqep[4] = dqep[4] + reverse_bits(dqep[0] >> 10, 2) * 256;
  2418. pqep[5] = dqep[5] + reverse_bits(dqep[1] >> 10, 2) * 256;
  2419. pqep[6] = dqep[6] + reverse_bits(dqep[2] >> 10, 2) * 256;
  2420. packed[0] = get_mode_prefix(12);
  2421. packed[1] = (pqep[2] << 20) + (pqep[1] << 10) + pqep[0];
  2422. packed[2] = (pqep[6] << 20) + (pqep[5] << 10) + pqep[4];
  2423. }
  2424. else if (mode == 13)
  2425. {
  2426. int dqep[8];
  2427. for (uniform int p = 0; p < 3; p++)
  2428. {
  2429. dqep[p] = qep[p];
  2430. dqep[4 + p] = (qep[4 + p] - qep[p]) & 15;
  2431. }
  2432. for (uniform int i = 1; i < 2; i++)
  2433. for (uniform int p = 0; p < 3; p++)
  2434. {
  2435. assert( qep[i * 4 + p] - qep[p] <= 7);
  2436. assert(-8 <= qep[i * 4 + p] - qep[p]);
  2437. }
  2438. /*
  2439. r0[9:0], g0[9:0], b0[9:0],
  2440. r1[3:0], r0[10:15],
  2441. g1[3:0], g0[10:15],
  2442. b1[3:0], b0[10:15]
  2443. */
  2444. uint32 pqep[8];
  2445. pqep[0] = dqep[0] & 1023;
  2446. pqep[1] = dqep[1] & 1023;
  2447. pqep[2] = dqep[2] & 1023;
  2448. pqep[4] = dqep[4] + reverse_bits(dqep[0] >> 10, 6) * 16;
  2449. pqep[5] = dqep[5] + reverse_bits(dqep[1] >> 10, 6) * 16;
  2450. pqep[6] = dqep[6] + reverse_bits(dqep[2] >> 10, 6) * 16;
  2451. packed[0] = get_mode_prefix(13);
  2452. packed[1] = (pqep[2] << 20) + (pqep[1] << 10) + pqep[0];
  2453. packed[2] = (pqep[6] << 20) + (pqep[5] << 10) + pqep[4];
  2454. }
  2455. else
  2456. {
  2457. assert(false);
  2458. }
  2459. }
  2460. void bc6h_code_2p(uint32 data[5], int qep[], uint32 qblock[2], int part_id, int mode)
  2461. {
  2462. uniform int bits = 3;
  2463. uniform int pairs = 2;
  2464. uniform int channels = 3;
  2465. int flips = bc7_code_apply_swap_mode01237(qep, qblock, 1, part_id);
  2466. for (uniform int k=0; k<5; k++) data[k] = 0;
  2467. uniform int pos = 0;
  2468. uint32 packed[4];
  2469. bc6h_pack(packed, qep, mode);
  2470. // mode
  2471. put_bits(data, &pos, 5, packed[0]);
  2472. // endpoints
  2473. put_bits(data, &pos, 30, packed[1]);
  2474. put_bits(data, &pos, 30, packed[2]);
  2475. put_bits(data, &pos, 12, packed[3]);
  2476. // partition
  2477. put_bits(data, &pos, 5, part_id);
  2478. // quantized values
  2479. bc7_code_qblock(data, &pos, qblock, bits, flips);
  2480. bc7_code_adjust_skip_mode01237(data, 1, part_id);
  2481. }
  2482. void bc6h_code_1p(uint32 data[5], int qep[8], uint32 qblock[2], int mode)
  2483. {
  2484. bc7_code_apply_swap_mode456(qep, 4, qblock, 4);
  2485. for (uniform int k = 0; k<5; k++) data[k] = 0;
  2486. uniform int pos = 0;
  2487. uint32 packed[4];
  2488. bc6h_pack(packed, qep, mode);
  2489. // mode
  2490. put_bits(data, &pos, 5, packed[0]);
  2491. // endpoints
  2492. put_bits(data, &pos, 30, packed[1]);
  2493. put_bits(data, &pos, 30, packed[2]);
  2494. // quantized values
  2495. bc7_code_qblock(data, &pos, qblock, 4, 0);
  2496. }
  2497. //////////////////////////
  2498. // BC6H core
  2499. void bc6h_setup(bc6h_enc_state state[])
  2500. {
  2501. for (uniform int p = 0; p < 3; p++)
  2502. {
  2503. state->rgb_bounds[p ] = 0xFFFF;
  2504. state->rgb_bounds[3+p] = 0;
  2505. }
  2506. // uf16 conversion, min/max
  2507. for (uniform int p = 0; p < 3; p++)
  2508. for (uniform int k = 0; k < 16; k++)
  2509. {
  2510. state->block[p * 16 + k] = (state->block[p * 16 + k] / 31) * 64;
  2511. state->rgb_bounds[p ] = min(state->rgb_bounds[p ], state->block[p * 16 + k]);
  2512. state->rgb_bounds[3+p] = max(state->rgb_bounds[3+p], state->block[p * 16 + k]);
  2513. }
  2514. state->max_span = 0;
  2515. state->max_span_idx = 0;
  2516. float rgb_span[0] = { 0, 0, 0 };
  2517. for (uniform int p = 0; p < 3; p++)
  2518. {
  2519. rgb_span[p] = state->rgb_bounds[3+p] - state->rgb_bounds[p];
  2520. if (rgb_span[p] > state->max_span)
  2521. {
  2522. state->max_span_idx = p;
  2523. state->max_span = rgb_span[p];
  2524. }
  2525. }
  2526. }
  2527. inline void CompressBlockBC6H_core(bc6h_enc_state state[])
  2528. {
  2529. bc6h_setup(state);
  2530. if (state->slow_mode)
  2531. {
  2532. bc6h_test_mode(state, 0, true, 0);
  2533. bc6h_test_mode(state, 1, true, 0);
  2534. bc6h_test_mode(state, 2, true, 0);
  2535. bc6h_test_mode(state, 5, true, 0);
  2536. bc6h_test_mode(state, 6, true, 0);
  2537. bc6h_test_mode(state, 9, true, 0);
  2538. bc6h_test_mode(state, 10, true, 0);
  2539. bc6h_test_mode(state, 11, true, 0);
  2540. bc6h_test_mode(state, 12, true, 0);
  2541. bc6h_test_mode(state, 13, true, 0);
  2542. }
  2543. else
  2544. {
  2545. if (state->fastSkipTreshold > 0)
  2546. {
  2547. bc6h_test_mode(state, 9, false, 0);
  2548. if (state->fast_mode) bc6h_test_mode(state, 1, false, 1);
  2549. bc6h_test_mode(state, 6, false, 1 / 1.2);
  2550. bc6h_test_mode(state, 5, false, 1 / 1.2);
  2551. bc6h_test_mode(state, 0, false, 1 / 1.2);
  2552. bc6h_test_mode(state, 2, false, 1);
  2553. bc6h_enc_2p(state);
  2554. if (!state->fast_mode) bc6h_test_mode(state, 1, true, 0);
  2555. }
  2556. bc6h_test_mode(state, 10, false, 0);
  2557. bc6h_test_mode(state, 11, false, 1);
  2558. bc6h_test_mode(state, 12, false, 1);
  2559. bc6h_test_mode(state, 13, false, 1);
  2560. bc6h_enc_1p(state);
  2561. }
  2562. }
  2563. void bc6h_enc_copy_settings(bc6h_enc_state state[], uniform bc6h_enc_settings settings[])
  2564. {
  2565. state->slow_mode = settings->slow_mode;
  2566. state->fast_mode = settings->fast_mode;
  2567. state->fastSkipTreshold = settings->fastSkipTreshold;
  2568. state->refineIterations_1p = settings->refineIterations_1p;
  2569. state->refineIterations_2p = settings->refineIterations_2p;
  2570. }
  2571. inline void CompressBlockBC6H(uniform rgba_surface src[], int xx, uniform int yy, uniform uint8 dst[], uniform bc6h_enc_settings settings[])
  2572. {
  2573. bc6h_enc_state _state;
  2574. varying bc6h_enc_state* uniform state = &_state;
  2575. bc6h_enc_copy_settings(state, settings);
  2576. load_block_interleaved_16bit(state->block, src, xx, yy);
  2577. state->best_err = 1e99;
  2578. CompressBlockBC6H_core(state);
  2579. store_data(dst, src->width, xx, yy, state->best_data, 4);
  2580. }
  2581. static export void CompressBlocksBC6H_ispc(uniform rgba_surface src[], uniform uint8 dst[], uniform bc6h_enc_settings settings[])
  2582. {
  2583. for (uniform int yy = 0; yy<src->height / 4; yy++)
  2584. foreach(xx = 0 ... src->width / 4)
  2585. {
  2586. CompressBlockBC6H(src, xx, yy, dst, settings);
  2587. }
  2588. }
  2589. ///////////////////////////////////////////////////////////
  2590. // ETC encoding
  2591. struct etc_enc_settings
  2592. {
  2593. int fastSkipTreshold;
  2594. };
  2595. struct etc_enc_state
  2596. {
  2597. float block[64];
  2598. int prev_qcenter[3];
  2599. float best_err;
  2600. uint32 best_data[2];
  2601. uniform bool diff;
  2602. // settings
  2603. uniform int fastSkipTreshold;
  2604. };
  2605. inline uniform int get_etc1_dY(uniform int table, uniform int q)
  2606. {
  2607. static uniform const int etc_codeword_table[8][4] =
  2608. {
  2609. { -8, -2, 2, 8 },
  2610. { -17, -5, 5, 17 },
  2611. { -29, -9, 9, 29 },
  2612. { -42, -13, 13, 42 },
  2613. { -60, -18, 18, 60 },
  2614. { -80, -24, 24, 80 },
  2615. { -106, -33, 33, 106 },
  2616. { -183, -47, 47, 183 },
  2617. };
  2618. return etc_codeword_table[table][q];
  2619. }
  2620. uniform int remap_q[] = { 2, 3, 1, 0 };
  2621. int get_remap2_q(int x)
  2622. {
  2623. x -= 2;
  2624. if (x < 0) x = 1 - x;
  2625. return x;
  2626. }
  2627. int extend_4to8bits(int value)
  2628. {
  2629. return (value << 4) | value;
  2630. }
  2631. int extend_5to8bits(int value)
  2632. {
  2633. return (value << 3) | (value >> 2);
  2634. }
  2635. int quantize_4bits(float value)
  2636. {
  2637. return clamp((value / 255.0f) * 15 + 0.5, 0, 15);
  2638. }
  2639. int quantize_5bits(float value)
  2640. {
  2641. return clamp((value / 255.0f) * 31 + 0.5, 0, 31);
  2642. }
  2643. void center_quant_dequant(int qcenter[3], float center[3], uniform bool diff, int prev_qcenter[3])
  2644. {
  2645. if (diff)
  2646. {
  2647. for (uniform int p = 0; p < 3; p++)
  2648. {
  2649. qcenter[p] = quantize_5bits(center[p]);
  2650. if (prev_qcenter[0] >= 0)
  2651. {
  2652. if (qcenter[p] - prev_qcenter[p] > 3) qcenter[p] = prev_qcenter[p] + 3;
  2653. if (qcenter[p] - prev_qcenter[p] < -4) qcenter[p] = prev_qcenter[p] - 4;
  2654. }
  2655. center[p] = extend_5to8bits(qcenter[p]);
  2656. }
  2657. }
  2658. else
  2659. {
  2660. for (uniform int p = 0; p < 3; p++)
  2661. {
  2662. qcenter[p] = quantize_4bits(center[p]);
  2663. center[p] = extend_4to8bits(qcenter[p]);
  2664. }
  2665. }
  2666. }
  2667. float quantize_pixels_etc1_half(uint32 qblock[1], float block[48], float center[3], uniform int table)
  2668. {
  2669. float total_err = 0;
  2670. uint32 bits = 0;
  2671. for (uniform int y = 0; y < 2; y++)
  2672. for (uniform int x = 0; x < 4; x++)
  2673. {
  2674. float best_err = sq(255) * 3;
  2675. int best_q = -1;
  2676. for (uniform int q = 0; q < 4; q++)
  2677. {
  2678. int dY = get_etc1_dY(table, remap_q[q]);
  2679. float err = 0;
  2680. for (int p = 0; p < 3; p++)
  2681. err += sq(block[16 * p + y*4+x] - clamp(center[p] + dY, 0, 255));
  2682. if (err < best_err)
  2683. {
  2684. best_err = err;
  2685. best_q = q;
  2686. }
  2687. }
  2688. assert(best_q >= 0);
  2689. bits |= (best_q & 1) << (x * 4 + y);
  2690. bits |= (best_q >> 1) << (x * 4 + y + 16);
  2691. total_err += best_err;
  2692. }
  2693. qblock[0] = bits;
  2694. return total_err;
  2695. }
  2696. float compress_etc1_half_1(uint32 out_qbits[1], int out_table[1], int out_qcenter[3],
  2697. float half_pixels[], uniform bool diff, int prev_qcenter[3])
  2698. {
  2699. float dc[3];
  2700. for (uniform int p = 0; p<3; p++) dc[p] = 0;
  2701. for (uniform int k = 0; k<8; k++)
  2702. {
  2703. for (uniform int p = 0; p<3; p++)
  2704. dc[p] += half_pixels[k + p * 16];
  2705. }
  2706. float best_error = sq(255) * 3 * 8.0f;
  2707. int best_table = -1;
  2708. int best_qcenter[3];
  2709. uint32 best_qbits;
  2710. for (uniform int table_level = 0; table_level < 8; table_level++)
  2711. {
  2712. float center[3];
  2713. int qcenter[3];
  2714. uint32 qbits;
  2715. for (uniform int p = 0; p < 3; p++) center[p] = dc[p] / 8 - get_etc1_dY(table_level, 2);
  2716. center_quant_dequant(qcenter, center, diff, prev_qcenter);
  2717. float err = quantize_pixels_etc1_half(&qbits, half_pixels, center, table_level);
  2718. if (err < best_error)
  2719. {
  2720. best_error = err;
  2721. best_table = table_level;
  2722. best_qbits = qbits;
  2723. for (uniform int p = 0; p < 3; p++) best_qcenter[p] = qcenter[p];
  2724. }
  2725. }
  2726. out_table[0] = best_table;
  2727. out_qbits[0] = best_qbits;
  2728. for (uniform int p = 0; p < 3; p++) out_qcenter[p] = best_qcenter[p];
  2729. return best_error;
  2730. }
  2731. float optimize_center(float colors[4][10], uniform int p, uniform int table_level)
  2732. {
  2733. float best_center = 0;
  2734. for (uniform int q = 0; q < 4; q++)
  2735. {
  2736. best_center += (colors[q][7 + p] - get_etc1_dY(table_level, q)) * colors[q][3];
  2737. }
  2738. best_center /= 8;
  2739. float best_err = 0;
  2740. for (uniform int q = 0; q < 4; q++)
  2741. {
  2742. float dY = get_etc1_dY(table_level, q);
  2743. best_err += sq(clamp(best_center + dY, 0, 255) - colors[q][7 + p]) * colors[q][3];
  2744. }
  2745. for (uniform int branch = 0; branch < 4; branch++)
  2746. {
  2747. float new_center = 0;
  2748. float sum = 0;
  2749. for (uniform int q = 0; q < 4; q++)
  2750. {
  2751. if (branch <= 1 && q <= branch) continue;
  2752. if (branch >= 2 && q >= branch) continue;
  2753. new_center += (colors[q][7 + p] - get_etc1_dY(table_level, q)) * colors[q][3];
  2754. sum += colors[q][3];
  2755. }
  2756. new_center /= sum;
  2757. float err = 0;
  2758. for (uniform int q = 0; q < 4; q++)
  2759. {
  2760. float dY = get_etc1_dY(table_level, q);
  2761. err += sq(clamp(new_center + dY, 0, 255) - colors[q][7 + p]) * colors[q][3];
  2762. }
  2763. if (err < best_err)
  2764. {
  2765. best_err = err;
  2766. best_center = new_center;
  2767. }
  2768. }
  2769. return best_center;
  2770. }
  2771. float compress_etc1_half_7(uint32 out_qbits[1], int out_table[1], int out_qcenter[3],
  2772. float half_pixels[], etc_enc_state state[])
  2773. {
  2774. int err_list[165];
  2775. int y_sorted_inv[8];
  2776. float y_sorted[8];
  2777. {
  2778. int y_sorted_idx[8];
  2779. for (uniform int k = 0; k < 8; k++)
  2780. {
  2781. float value = 0;
  2782. for (uniform int p = 0; p < 3; p++)
  2783. value += half_pixels[k + p * 16];
  2784. y_sorted_idx[k] = (((int)value) << 4) + k;
  2785. }
  2786. partial_sort_list(y_sorted_idx, 8, 8);
  2787. for (uniform int k = 0; k < 8; k++)
  2788. y_sorted_inv[k] = ((y_sorted_idx[k] & 0xF) << 4) + k;
  2789. for (uniform int k = 0; k < 8; k++)
  2790. y_sorted[k] = (y_sorted_idx[k] >> 4) / 3.0f;
  2791. partial_sort_list(y_sorted_inv, 8, 8);
  2792. }
  2793. uniform int idx = -1;
  2794. for (uniform int level1 = 0; level1 <= 8; level1++)
  2795. for (uniform int level2 = level1; level2 <= 8; level2++)
  2796. for (uniform int level3 = level2; level3 <= 8; level3++)
  2797. {
  2798. idx++;
  2799. assert(idx < 165);
  2800. float sum[4];
  2801. float sum_sq[4];
  2802. float count[4];
  2803. float inv_count[4];
  2804. for (uniform int q = 0; q < 4; q++)
  2805. {
  2806. sum[q] = 0;
  2807. sum_sq[q] = 0;
  2808. count[q] = 0;
  2809. inv_count[q] = 0;
  2810. }
  2811. for (uniform int k = 0; k < 8; k++)
  2812. {
  2813. uniform int q = 0;
  2814. if (k >= level1) q = 1;
  2815. if (k >= level2) q = 2;
  2816. if (k >= level3) q = 3;
  2817. sum[q] += y_sorted[k];
  2818. sum_sq[q] += sq(y_sorted[k]);
  2819. count[q] += 1;
  2820. }
  2821. for (uniform int q = 0; q < 4; q++)
  2822. {
  2823. if (count[q] > 0) inv_count[q] = 1 / count[q];
  2824. }
  2825. float base_err = 0;
  2826. for (uniform int q = 0; q < 4; q++) base_err += sum_sq[q] - sq(sum[q]) * inv_count[q];
  2827. float t_err = sq(256) * 8;
  2828. for (uniform int table_level = 0; table_level < 8; table_level++)
  2829. {
  2830. float center = 0;
  2831. for (uniform int q = 0; q < 4; q++) center += sum[q] - get_etc1_dY(table_level, q) * count[q];
  2832. center /= 8;
  2833. float err = base_err;
  2834. for (uniform int q = 0; q < 4; q++)
  2835. {
  2836. err += sq(center + get_etc1_dY(table_level, q) - sum[q] * inv_count[q])*count[q];
  2837. }
  2838. t_err = min(t_err, err);
  2839. }
  2840. int packed = (level1 * 16 + level2) * 16 + level3;
  2841. err_list[idx] = (((int)t_err) << 12) + packed;
  2842. }
  2843. partial_sort_list(err_list, 165, state->fastSkipTreshold);
  2844. float best_error = sq(255) * 3 * 8.0f;
  2845. int best_table = -1;
  2846. int best_qcenter[3];
  2847. uint32 best_qbits;
  2848. for (uniform int i = 0; i < state->fastSkipTreshold; i++)
  2849. {
  2850. int packed = err_list[i] & 0xFFF;
  2851. int level1 = (packed >> 8) & 0xF;
  2852. int level2 = (packed >> 4) & 0xF;
  2853. int level3 = (packed >> 0) & 0xF;
  2854. float colors[4][10];
  2855. for (uniform int p = 0; p < 7; p++)
  2856. for (uniform int q = 0; q < 4; q++) colors[q][p] = 0;
  2857. uint32 qbits = 0;
  2858. for (uniform int kk = 0; kk < 8; kk++)
  2859. {
  2860. int k = y_sorted_inv[kk] & 0xF;
  2861. int qq = 0;
  2862. if (k >= level1) qq = 1;
  2863. if (k >= level2) qq = 2;
  2864. if (k >= level3) qq = 3;
  2865. uniform int xx = kk & 3;
  2866. uniform int yy = kk >> 2;
  2867. int qqq = get_remap2_q(qq);
  2868. qbits |= (qqq & 1) << (yy + xx * 4);
  2869. qbits |= (qqq >> 1) << (16 + yy + xx * 4);
  2870. float qvec[4];
  2871. for (uniform int q = 0; q < 4; q++)
  2872. {
  2873. qvec[q] = q == qq ? 1.0 : 0.0;
  2874. colors[q][3] += qvec[q];
  2875. }
  2876. for (uniform int p = 0; p < 3; p++)
  2877. {
  2878. float value = half_pixels[16 * p + kk];
  2879. for (uniform int q = 0; q < 4; q++)
  2880. {
  2881. colors[q][p] += value * qvec[q];
  2882. colors[q][4 + p] += sq(value) * qvec[q];
  2883. }
  2884. }
  2885. }
  2886. float base_err = 0;
  2887. for (uniform int q = 0; q < 4; q++)
  2888. {
  2889. if (colors[q][3] > 0)
  2890. for (uniform int p = 0; p < 3; p++)
  2891. {
  2892. colors[q][7 + p] = colors[q][p] / colors[q][3];
  2893. base_err += colors[q][4 + p] - sq(colors[q][7 + p])*colors[q][3];
  2894. }
  2895. }
  2896. for (uniform int table_level = 0; table_level < 8; table_level++)
  2897. {
  2898. float center[3];
  2899. int qcenter[3];
  2900. for (uniform int p = 0; p < 3; p++)
  2901. {
  2902. center[p] = optimize_center(colors, p, table_level);
  2903. }
  2904. center_quant_dequant(qcenter, center, state->diff, state->prev_qcenter);
  2905. float err = base_err;
  2906. for (uniform int q = 0; q < 4; q++)
  2907. {
  2908. int dY = get_etc1_dY(table_level, q);
  2909. for (uniform int p = 0; p < 3; p++)
  2910. err += sq(clamp(center[p] + dY, 0, 255) - colors[q][7 + p])*colors[q][3];
  2911. }
  2912. if (err < best_error)
  2913. {
  2914. best_error = err;
  2915. best_table = table_level;
  2916. best_qbits = qbits;
  2917. for (uniform int p = 0; p < 3; p++) best_qcenter[p] = qcenter[p];
  2918. }
  2919. }
  2920. }
  2921. out_table[0] = best_table;
  2922. out_qbits[0] = best_qbits;
  2923. for (uniform int p = 0; p < 3; p++) out_qcenter[p] = best_qcenter[p];
  2924. return best_error;
  2925. }
  2926. float compress_etc1_half(uint32 qbits[1], int table[1], int qcenter[3], float half_pixels[], etc_enc_state state[])
  2927. {
  2928. float err = compress_etc1_half_7(qbits, table, qcenter, half_pixels, state);
  2929. for (uniform int p = 0; p < 3; p++)
  2930. state->prev_qcenter[p] = qcenter[p];
  2931. return err;
  2932. }
  2933. //////////////////////////
  2934. // ETC1 core
  2935. inline uint32 bswap32(uint32 v)
  2936. {
  2937. uint32 r = 0;
  2938. r += ((v >> 24) & 255) << 0;
  2939. r += ((v >> 16) & 255) << 8;
  2940. r += ((v >> 8) & 255) << 16;
  2941. r += ((v >> 0) & 255) << 24;
  2942. return r;
  2943. }
  2944. void etc_pack(uint32 data[], uint32 qbits[2], int tables[2], int qcenters[2][3], uniform int diff, uniform int flip)
  2945. {
  2946. for (uniform int k = 0; k < 2; k++) data[k] = 0;
  2947. uniform int pos = 0;
  2948. if (diff == 0)
  2949. {
  2950. put_bits(data, &pos, 4, qcenters[1][0]);
  2951. put_bits(data, &pos, 4, qcenters[0][0]);
  2952. put_bits(data, &pos, 4, qcenters[1][1]);
  2953. put_bits(data, &pos, 4, qcenters[0][1]);
  2954. put_bits(data, &pos, 4, qcenters[1][2]);
  2955. put_bits(data, &pos, 4, qcenters[0][2]);
  2956. }
  2957. else
  2958. {
  2959. put_bits(data, &pos, 3, (qcenters[1][0] - qcenters[0][0]) & 7);
  2960. put_bits(data, &pos, 5, qcenters[0][0]);
  2961. put_bits(data, &pos, 3, (qcenters[1][1] - qcenters[0][1]) & 7);
  2962. put_bits(data, &pos, 5, qcenters[0][1]);
  2963. put_bits(data, &pos, 3, (qcenters[1][2] - qcenters[0][2]) & 7);
  2964. put_bits(data, &pos, 5, qcenters[0][2]);
  2965. }
  2966. put_bits(data, &pos, 1, flip);
  2967. put_bits(data, &pos, 1, diff);
  2968. put_bits(data, &pos, 3, tables[1]);
  2969. put_bits(data, &pos, 3, tables[0]);
  2970. uint32 all_qbits_flipped = (qbits[1] << 2) | qbits[0];
  2971. uint32 all_qbits = 0;
  2972. if (flip != 0) all_qbits = all_qbits_flipped;
  2973. if (flip == 0)
  2974. for (uniform int k = 0; k < 2; k++)
  2975. for (uniform int y = 0; y < 4; y++)
  2976. for (uniform int x = 0; x < 4; x++)
  2977. {
  2978. int bit = (all_qbits_flipped >> (k * 16 + x * 4 + y)) & 1;
  2979. all_qbits += bit << (k * 16 + y * 4 + x);
  2980. }
  2981. data[1] = bswap32(all_qbits);
  2982. }
  2983. inline void CompressBlockETC1_core(etc_enc_state state[])
  2984. {
  2985. float flipped_block[48];
  2986. for (uniform int y = 0; y < 4; y++)
  2987. for (uniform int x = 0; x < 4; x++)
  2988. for (uniform int p = 0; p < 3; p++)
  2989. {
  2990. flipped_block[16 * p + x * 4 + y] = state->block[16 * p + y * 4 + x];
  2991. }
  2992. for (uniform int flip = 0; flip < 2; flip++)
  2993. for (uniform int diff = 1; diff >= 0; diff--)
  2994. {
  2995. state->diff = diff == 1;
  2996. state->prev_qcenter[0] = -1;
  2997. varying float * uniform pixels = state->block;
  2998. if (flip == 0) pixels = flipped_block;
  2999. uint32 qbits[2];
  3000. int tables[2];
  3001. int qcenters[2][3];
  3002. float err = 0;
  3003. err += compress_etc1_half(&qbits[0], &tables[0], qcenters[0], &pixels[0], state);
  3004. err += compress_etc1_half(&qbits[1], &tables[1], qcenters[1], &pixels[8], state);
  3005. if (err < state->best_err)
  3006. {
  3007. state->best_err = err;
  3008. etc_pack(state->best_data, qbits, tables, qcenters, diff, flip);
  3009. }
  3010. }
  3011. }
  3012. void etc_enc_copy_settings(etc_enc_state state[], uniform etc_enc_settings settings[])
  3013. {
  3014. state->fastSkipTreshold = settings->fastSkipTreshold;
  3015. }
  3016. inline void CompressBlockETC1(uniform rgba_surface src[], int xx, uniform int yy, uniform uint8 dst[], uniform etc_enc_settings settings[])
  3017. {
  3018. etc_enc_state _state;
  3019. varying etc_enc_state* uniform state = &_state;
  3020. etc_enc_copy_settings(state, settings);
  3021. load_block_interleaved(state->block, src, xx, yy);
  3022. state->best_err = 1e99;
  3023. CompressBlockETC1_core(state);
  3024. store_data(dst, src->width, xx, yy, state->best_data, 2);
  3025. }
  3026. static export void CompressBlocksETC1_ispc(uniform rgba_surface src[], uniform uint8 dst[], uniform etc_enc_settings settings[])
  3027. {
  3028. for (uniform int yy = 0; yy<src->height / 4; yy++)
  3029. foreach(xx = 0 ... src->width / 4)
  3030. {
  3031. CompressBlockETC1(src, xx, yy, dst, settings);
  3032. }
  3033. }