kernel.ispc.cpp 101 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727
  1. /*
  2. Copyright (c) 2013, Intel Corporation
  3. All rights reserved.
  4. Redistribution and use in source and binary forms, with or without
  5. modification, are permitted provided that the following conditions are
  6. met:
  7. * Redistributions of source code must retain the above copyright
  8. notice, this list of conditions and the following disclaimer.
  9. * Redistributions in binary form must reproduce the above copyright
  10. notice, this list of conditions and the following disclaimer in the
  11. documentation and/or other materials provided with the distribution.
  12. * Neither the name of Intel Corporation nor the names of its
  13. contributors may be used to endorse or promote products derived from
  14. this software without specific prior written permission.
  15. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
  16. IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
  17. TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
  18. PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
  19. OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  20. EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  21. PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  22. PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  23. LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  24. NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  25. SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  26. */
  27. typedef U8 uint8;
  28. typedef U16 uint16;
  29. typedef I32 int32;
  30. typedef U32 uint32;
  31. typedef U64 uint64;
  32. #define uniform
  33. #define varying
  34. #define export
  35. namespace ispc
  36. {
  37. inline float abs(float x) {return fabsf(x);}
  38. inline float sqrt(float x) {return sqrtf(x);}
  39. inline float rsqrt(float x) {return 1.0f/sqrtf(x);}
  40. inline float rcp (float x) {return 1.0f/ x ;}
  41. inline float min(float a, float b) {return (a<=b) ? a : b;}
  42. inline float max(float a, float b) {return (a>=b) ? a : b;}
  43. inline float clamp(float v, float a, float b)
  44. {
  45. return (v<=a) ? a : (v>=b) ? b : v;
  46. }
  47. ///////////////////////////
  48. // generic helpers
  49. inline void swap_ints(int u[], int v[], uniform int n)
  50. {
  51. for (uniform int i=0; i<n; i++)
  52. {
  53. int t = u[i];
  54. u[i] = v[i];
  55. v[i] = t;
  56. }
  57. }
  58. inline void swap_uints(uint32 u[], uint32 v[], uniform int n)
  59. {
  60. for (uniform int i=0; i<n; i++)
  61. {
  62. uint32 t = u[i];
  63. u[i] = v[i];
  64. v[i] = t;
  65. }
  66. }
  67. inline float sq(float v)
  68. {
  69. return v*v;
  70. }
  71. inline int pow2(int x)
  72. {
  73. return 1<<x;
  74. }
  75. inline float clamp(float v, int a, int b)
  76. {
  77. return (v<=a) ? a : (v>=b) ? b : v;
  78. }
  79. // the following helpers isolate performance warnings
  80. inline uint32 gather_uint(const uniform uint32* const uniform ptr, int idx)
  81. {
  82. return ptr[idx]; // (perf warning expected)
  83. }
  84. /*inline uint32 gather_uint(const varying uint32* const uniform ptr, int idx)
  85. {
  86. return ptr[idx]; // (perf warning expected)
  87. }*/
  88. inline int32 gather_int(const uniform int32* const uniform ptr, int idx)
  89. {
  90. return ptr[idx]; // (perf warning expected)
  91. }
  92. inline float gather_float(varying float* uniform ptr, int idx)
  93. {
  94. return ptr[idx]; // (perf warning expected)
  95. }
  96. inline void scatter_uint(uniform uint32* ptr, int idx, uint32 value)
  97. {
  98. ptr[idx] = value; // (perf warning expected)
  99. }
  100. inline void scatter_int(varying int32* uniform ptr, int idx, uint32 value)
  101. {
  102. ptr[idx] = value; // (perf warning expected)
  103. }
  104. inline uint32 shift_right(uint32 v, const uniform int bits)
  105. {
  106. return v>>bits; // (perf warning expected)
  107. }
  108. ///////////////////////////////////////////////////////////
  109. // BC1/BC7 shared
  110. /*struct rgba_surface
  111. {
  112. uint8* ptr;
  113. int width, height, stride;
  114. };*/
  115. inline void load_block_interleaved(float block[48], uniform rgba_surface* uniform src, int xx, uniform int yy)
  116. {
  117. for (uniform int y = 0; y<4; y++)
  118. for (uniform int x = 0; x<4; x++)
  119. {
  120. uniform uint32* uniform src_ptr = (uint32*)&src->ptr[(yy*4+y)*src->stride];
  121. uint32 rgba = gather_uint(src_ptr, xx*4+x);
  122. block[16 * 0 + y * 4 + x] = (int)((rgba >> 0) & 255);
  123. block[16 * 1 + y * 4 + x] = (int)((rgba >> 8) & 255);
  124. block[16 * 2 + y * 4 + x] = (int)((rgba >> 16) & 255);
  125. }
  126. }
  127. inline void load_block_interleaved_rgba(float block[64], uniform rgba_surface* uniform src, int xx, uniform int yy)
  128. {
  129. for (uniform int y=0; y<4; y++)
  130. for (uniform int x=0; x<4; x++)
  131. {
  132. uniform uint32* uniform src_ptr = (uint32*)&src->ptr[(yy*4+y)*src->stride];
  133. uint32 rgba = gather_uint(src_ptr, xx*4+x);
  134. block[16*0+y*4+x] = (int)((rgba>> 0)&255);
  135. block[16*1+y*4+x] = (int)((rgba>> 8)&255);
  136. block[16*2+y*4+x] = (int)((rgba>>16)&255);
  137. block[16*3+y*4+x] = (int)((rgba>>24)&255);
  138. }
  139. }
  140. inline void load_block_interleaved_16bit(float block[48], uniform rgba_surface* uniform src, int xx, uniform int yy)
  141. {
  142. for (uniform int y = 0; y<4; y++)
  143. for (uniform int x = 0; x<4; x++)
  144. {
  145. uniform uint32* uniform src_ptr_r = (uint32*)&src->ptr[(yy * 4 + y)*src->stride + 0];
  146. uniform uint32* uniform src_ptr_g = (uint32*)&src->ptr[(yy * 4 + y)*src->stride + 2];
  147. uniform uint32* uniform src_ptr_b = (uint32*)&src->ptr[(yy * 4 + y)*src->stride + 4];
  148. uint32 xr = gather_uint(src_ptr_r, (xx * 4 + x) * 2);
  149. uint32 xg = gather_uint(src_ptr_g, (xx * 4 + x) * 2);
  150. uint32 xb = gather_uint(src_ptr_b, (xx * 4 + x) * 2);
  151. block[16 * 0 + y * 4 + x] = (int)(xr & 0xFFFF);
  152. block[16 * 1 + y * 4 + x] = (int)(xg & 0xFFFF);
  153. block[16 * 2 + y * 4 + x] = (int)(xb & 0xFFFF);
  154. block[16 * 3 + y * 4 + x] = 0;
  155. }
  156. }
  157. inline void store_data(uniform uint8 dst[], int width, int xx, uniform int yy, uint32 data[], int data_size)
  158. {
  159. for (uniform int k=0; k<data_size; k++)
  160. {
  161. uniform uint32* dst_ptr = (uint32*)&dst[(yy)*width*data_size];
  162. scatter_uint(dst_ptr, xx*data_size+k, data[k]);
  163. }
  164. }
  165. inline void ssymv(float a[3], float covar[6], float b[3])
  166. {
  167. a[0] = covar[0]*b[0]+covar[1]*b[1]+covar[2]*b[2];
  168. a[1] = covar[1]*b[0]+covar[3]*b[1]+covar[4]*b[2];
  169. a[2] = covar[2]*b[0]+covar[4]*b[1]+covar[5]*b[2];
  170. }
  171. inline void ssymv3(float a[4], float covar[10], float b[4])
  172. {
  173. a[0] = covar[0]*b[0]+covar[1]*b[1]+covar[2]*b[2];
  174. a[1] = covar[1]*b[0]+covar[4]*b[1]+covar[5]*b[2];
  175. a[2] = covar[2]*b[0]+covar[5]*b[1]+covar[7]*b[2];
  176. }
  177. inline void ssymv4(float a[4], float covar[10], float b[4])
  178. {
  179. a[0] = covar[0]*b[0]+covar[1]*b[1]+covar[2]*b[2]+covar[3]*b[3];
  180. a[1] = covar[1]*b[0]+covar[4]*b[1]+covar[5]*b[2]+covar[6]*b[3];
  181. a[2] = covar[2]*b[0]+covar[5]*b[1]+covar[7]*b[2]+covar[8]*b[3];
  182. a[3] = covar[3]*b[0]+covar[6]*b[1]+covar[8]*b[2]+covar[9]*b[3];
  183. }
  184. inline void compute_axis3(float axis[3], float covar[6], uniform const int powerIterations)
  185. {
  186. float vec[3] = {1,1,1};
  187. for (uniform int i=0; i<powerIterations; i++)
  188. {
  189. ssymv(axis, covar, vec);
  190. for (uniform int p=0; p<3; p++) vec[p] = axis[p];
  191. if (i%2==1) // renormalize every other iteration
  192. {
  193. float norm_sq = 0;
  194. for (uniform int p=0; p<3; p++)
  195. norm_sq += axis[p]*axis[p];
  196. float rnorm = rsqrt(norm_sq);
  197. for (uniform int p=0; p<3; p++) vec[p] *= rnorm;
  198. }
  199. }
  200. for (uniform int p=0; p<3; p++) axis[p] = vec[p];
  201. }
  202. inline void compute_axis(float axis[4], float covar[10], uniform const int powerIterations, uniform int channels)
  203. {
  204. float vec[4] = {1,1,1,1};
  205. for (uniform int i=0; i<powerIterations; i++)
  206. {
  207. if (channels == 3) ssymv3(axis, covar, vec);
  208. if (channels == 4) ssymv4(axis, covar, vec);
  209. for (uniform int p=0; p<channels; p++) vec[p] = axis[p];
  210. if (i%2==1) // renormalize every other iteration
  211. {
  212. float norm_sq = 0;
  213. for (uniform int p=0; p<channels; p++)
  214. norm_sq += axis[p]*axis[p];
  215. float rnorm = rsqrt(norm_sq);
  216. for (uniform int p=0; p<channels; p++) vec[p] *= rnorm;
  217. }
  218. }
  219. for (uniform int p=0; p<channels; p++) axis[p] = vec[p];
  220. }
  221. ///////////////////////////////////////////////////////////
  222. // BC1/BC3 encoding
  223. inline int stb__Mul8Bit(int a, int b)
  224. {
  225. int t = a*b + 128;
  226. return (t + (t >> 8)) >> 8;
  227. }
  228. inline uint16 stb__As16Bit(int r, int g, int b)
  229. {
  230. return (stb__Mul8Bit(r,31) << 11) + (stb__Mul8Bit(g,63) << 5) + stb__Mul8Bit(b,31);
  231. }
  232. inline uint16 enc_rgb565(float c[3])
  233. {
  234. return stb__As16Bit((int)c[0], (int)c[1], (int)c[2]);
  235. }
  236. inline void dec_rgb565(float c[3], int p)
  237. {
  238. int c2 = (p>>0)&31;
  239. int c1 = (p>>5)&63;
  240. int c0 = (p>>11)&31;
  241. c[0] = (c0<<3)+(c0>>2);
  242. c[1] = (c1<<2)+(c1>>4);
  243. c[2] = (c2<<3)+(c2>>2);
  244. }
  245. inline void pick_endpoints_dc(int c0[3], int c1[3], int block[48], int iaxis[3])
  246. {
  247. for (uniform int p=0; p<3; p++)
  248. for (uniform int y=0; y<4; y++)
  249. for (uniform int x=0; x<4; x++)
  250. {
  251. c0[p] += block[p*16+y*4+x];
  252. }
  253. for (uniform int p=0; p<3; p++)
  254. c0[p] >>= 4;
  255. }
  256. inline void pick_endpoints(float c0[3], float c1[3], float block[48], float axis[3], float dc[3])
  257. {
  258. float min_dot = 256*256;
  259. float max_dot = 0;
  260. for (uniform int y=0; y<4; y++)
  261. for (uniform int x=0; x<4; x++)
  262. {
  263. float dot = 0;
  264. for (uniform int p=0; p<3; p++)
  265. dot += (block[p*16+y*4+x]-dc[p])*axis[p];
  266. min_dot = min(min_dot, dot);
  267. max_dot = max(max_dot, dot);
  268. }
  269. if (max_dot-min_dot < 1.0f)
  270. {
  271. min_dot -= 0.5f;
  272. max_dot += 0.5f;
  273. }
  274. float norm_sq = 0;
  275. for (uniform int p=0; p<3; p++)
  276. norm_sq += axis[p]*axis[p];
  277. float rnorm_sq = rcp(norm_sq);
  278. for (uniform int p=0; p<3; p++)
  279. {
  280. c0[p] = clamp(dc[p]+min_dot*rnorm_sq*axis[p], 0, 255);
  281. c1[p] = clamp(dc[p]+max_dot*rnorm_sq*axis[p], 0, 255);
  282. }
  283. }
  284. inline uint32 fast_quant(float block[48], int p0, int p1)
  285. {
  286. float c0[3];
  287. float c1[3];
  288. dec_rgb565(c0, p0);
  289. dec_rgb565(c1, p1);
  290. float dir[3];
  291. for (uniform int p=0; p<3; p++) dir[p] = c1[p]-c0[p];
  292. float sq_norm = 0;
  293. for (uniform int p=0; p<3; p++) sq_norm += sq(dir[p]);
  294. float rsq_norm = rcp(sq_norm);
  295. for (uniform int p=0; p<3; p++) dir[p] *= rsq_norm*3;
  296. float bias = 0.5f;
  297. for (uniform int p=0; p<3; p++) bias -= c0[p]*dir[p];
  298. uint32 bits = 0;
  299. uint32 scaler = 1;
  300. for (uniform int k=0; k<16; k++)
  301. {
  302. float dot = 0;
  303. for (uniform int p=0; p<3; p++)
  304. dot += block[k+p*16]*dir[p];
  305. int q = clamp((int)(dot+bias), 0, 3);
  306. //bits += q<<(k*2);
  307. bits += q*scaler;
  308. scaler *= 4;
  309. }
  310. return bits;
  311. }
  312. inline void compute_covar_dc(float covar[6], float dc[3], float block[48])
  313. {
  314. for (uniform int i=0; i<6; i++) covar[i] = 0;
  315. for (uniform int p=0; p<3; p++) dc[p] = 0;
  316. for (uniform int k=0; k<16; k++)
  317. {
  318. for (uniform int p=0; p<3; p++)
  319. dc[p] += block[k+p*16];
  320. }
  321. for (uniform int p=0; p<3; p++) dc[p] /= 16;
  322. for (uniform int k=0; k<16; k++)
  323. {
  324. float rgb[3];
  325. for (uniform int p=0; p<3; p++)
  326. rgb[p] = block[k+p*16]-dc[p];
  327. covar[0] += rgb[0]*rgb[0];
  328. covar[1] += rgb[0]*rgb[1];
  329. covar[2] += rgb[0]*rgb[2];
  330. covar[3] += rgb[1]*rgb[1];
  331. covar[4] += rgb[1]*rgb[2];
  332. covar[5] += rgb[2]*rgb[2];
  333. }
  334. }
  335. // ugly, but makes BC1 compression 20% faster overall
  336. inline void compute_covar_dc_ugly(float covar[6], float dc[3], float block[48])
  337. {
  338. for (uniform int p=0; p<3; p++)
  339. {
  340. float acc = 0;
  341. for (uniform int k=0; k<16; k++)
  342. acc += block[k+p*16];
  343. dc[p] = acc/16;
  344. }
  345. float covar0 = 0.0f;
  346. float covar1 = 0.0f;
  347. float covar2 = 0.0f;
  348. float covar3 = 0.0f;
  349. float covar4 = 0.0f;
  350. float covar5 = 0.0f;
  351. for (uniform int k=0; k<16; k++)
  352. {
  353. float rgb0, rgb1, rgb2;
  354. rgb0 = block[k+0*16]-dc[0];
  355. rgb1 = block[k+1*16]-dc[1];
  356. rgb2 = block[k+2*16]-dc[2];
  357. covar0 += rgb0*rgb0;
  358. covar1 += rgb0*rgb1;
  359. covar2 += rgb0*rgb2;
  360. covar3 += rgb1*rgb1;
  361. covar4 += rgb1*rgb2;
  362. covar5 += rgb2*rgb2;
  363. }
  364. covar[0] = covar0;
  365. covar[1] = covar1;
  366. covar[2] = covar2;
  367. covar[3] = covar3;
  368. covar[4] = covar4;
  369. covar[5] = covar5;
  370. }
  371. inline void bc1_refine(int pe[2], float block[48], uint32 bits, float dc[3])
  372. {
  373. float c0[3];
  374. float c1[3];
  375. if ((bits ^ (bits*4)) < 4)
  376. {
  377. // single color
  378. for (uniform int p=0; p<3; p++)
  379. {
  380. c0[p] = dc[p];
  381. c1[p] = dc[p];
  382. }
  383. }
  384. else
  385. {
  386. float Atb1[3] = {0,0,0};
  387. float sum_q = 0;
  388. float sum_qq = 0;
  389. uint32 shifted_bits = bits;
  390. for (uniform int k=0; k<16; k++)
  391. {
  392. float q = (int)(shifted_bits&3);
  393. shifted_bits >>= 2;
  394. float x = 3-q;
  395. float y = q;
  396. sum_q += q;
  397. sum_qq += q*q;
  398. for (uniform int p=0; p<3; p++) Atb1[p] += x*block[k+p*16];
  399. }
  400. float sum[3];
  401. float Atb2[3];
  402. for (uniform int p=0; p<3; p++)
  403. {
  404. sum[p] = dc[p]*16;
  405. Atb2[p] = 3*sum[p]-Atb1[p];
  406. }
  407. float Cxx = 16*sq(3)-2*3*sum_q+sum_qq;
  408. float Cyy = sum_qq;
  409. float Cxy = 3*sum_q-sum_qq;
  410. float scale = 3.0f * rcp(Cxx*Cyy - Cxy*Cxy);
  411. for (uniform int p=0; p<3; p++)
  412. {
  413. c0[p] = (Atb1[p]*Cyy - Atb2[p]*Cxy)*scale;
  414. c1[p] = (Atb2[p]*Cxx - Atb1[p]*Cxy)*scale;
  415. c0[p] = clamp(c0[p], 0, 255);
  416. c1[p] = clamp(c1[p], 0, 255);
  417. }
  418. }
  419. pe[0] = enc_rgb565(c0);
  420. pe[1] = enc_rgb565(c1);
  421. }
  422. inline uint32 fix_qbits(uint32 qbits)
  423. {
  424. uniform const uint32 mask_01b = 0x55555555;
  425. uniform const uint32 mask_10b = 0xAAAAAAAA;
  426. uint32 qbits0 = qbits&mask_01b;
  427. uint32 qbits1 = qbits&mask_10b;
  428. qbits = (qbits1>>1) + (qbits1 ^ (qbits0<<1));
  429. return qbits;
  430. }
  431. inline void CompressBlockBC1_core(float block[48], uint32 data[2])
  432. {
  433. uniform const int powerIterations = 4;
  434. uniform const int refineIterations = 1;
  435. float covar[6];
  436. float dc[3];
  437. compute_covar_dc_ugly(covar, dc, block);
  438. float eps = 0.001f;
  439. covar[0] += eps;
  440. covar[3] += eps;
  441. covar[5] += eps;
  442. float axis[3];
  443. compute_axis3(axis, covar, powerIterations);
  444. float c0[3];
  445. float c1[3];
  446. pick_endpoints(c0, c1, block, axis, dc);
  447. int p[2];
  448. p[0] = enc_rgb565(c0);
  449. p[1] = enc_rgb565(c1);
  450. if (p[0]<p[1]) swap_ints(&p[0], &p[1], 1);
  451. data[0] = (1<<16)*p[1]+p[0];
  452. data[1] = fast_quant(block, p[0], p[1]);
  453. // refine
  454. for (uniform int i=0; i<refineIterations; i++)
  455. {
  456. bc1_refine(p, block, data[1], dc);
  457. if (p[0]<p[1]) swap_ints(&p[0], &p[1], 1);
  458. data[0] = (1<<16)*p[1]+p[0];
  459. data[1] = fast_quant(block, p[0], p[1]);
  460. }
  461. data[1] = fix_qbits(data[1]);
  462. }
  463. inline void CompressBlockBC3_alpha(float block[16], uint32 data[2])
  464. {
  465. float ep[2] = { 255, 0 };
  466. for (uniform int k=0; k<16; k++)
  467. {
  468. ep[0] = min(ep[0], block[k]);
  469. ep[1] = max(ep[1], block[k]);
  470. }
  471. if (ep[0] == ep[1]) ep[1] = ep[0]+0.1f;
  472. uint32 qblock[2] = { 0, 0 };
  473. float scale = 7.0f/(ep[1]-ep[0]);
  474. for (uniform int k=0; k<16; k++)
  475. {
  476. float v = block[k];
  477. float proj = (v-ep[0])*scale+0.5f;
  478. int q = clamp((int)proj, 0, 7);
  479. q = 7-q;
  480. if (q > 0) q++;
  481. if (q==8) q = 1;
  482. qblock[k/8] |= q << ((k%8)*3);
  483. }
  484. // (could be improved by refinement)
  485. data[0] = clamp((int)ep[0], 0, 255)*256+clamp((int)ep[1], 0, 255);
  486. data[0] |= qblock[0]<<16;
  487. data[1] = qblock[0]>>16;
  488. data[1] |= qblock[1]<<8;
  489. }
  490. inline void CompressBlockBC1(uniform rgba_surface src[], int xx, uniform int yy, uniform uint8 dst[])
  491. {
  492. float block[48];
  493. uint32 data[2];
  494. load_block_interleaved(block, src, xx, yy);
  495. CompressBlockBC1_core(block, data);
  496. store_data(dst, src->width, xx, yy, data, 2);
  497. }
  498. inline void CompressBlockBC3(uniform rgba_surface src[], int xx, uniform int yy, uniform uint8 dst[])
  499. {
  500. float block[64];
  501. uint32 data[4];
  502. load_block_interleaved_rgba(block, src, xx, yy);
  503. CompressBlockBC3_alpha(&block[48], &data[0]);
  504. CompressBlockBC1_core(block, &data[2]);
  505. store_data(dst, src->width, xx, yy, data, 4);
  506. }
  507. export void CompressBlocksBC1_ispc(uniform rgba_surface src[], uniform uint8 dst[])
  508. {
  509. const int h_4=src->height/4, w_4=src->width/4;
  510. for (uniform int yy = 0; yy<h_4; yy++)
  511. for (uniform int xx = 0; xx<w_4; xx++) // foreach (xx = 0 ... src->width/4)
  512. {
  513. CompressBlockBC1(src, xx, yy, dst);
  514. }
  515. }
  516. export void CompressBlocksBC3_ispc(uniform rgba_surface src[], uniform uint8 dst[])
  517. {
  518. const int h_4=src->height/4, w_4=src->width/4;
  519. for (uniform int yy = 0; yy<h_4; yy++)
  520. for (uniform int xx = 0; xx<w_4; xx++) // foreach (xx = 0 ... src->width/4)
  521. {
  522. CompressBlockBC3(src, xx, yy, dst);
  523. }
  524. }
  525. ///////////////////////////////////////////////////////////
  526. // BC7 encoding
  527. /*struct bc7_enc_settings
  528. {
  529. bool mode_selection[4];
  530. int refineIterations[8];
  531. bool skip_mode2;
  532. int fastSkipTreshold_mode1;
  533. int fastSkipTreshold_mode3;
  534. int fastSkipTreshold_mode7;
  535. int mode45_channel0;
  536. int refineIterations_channel;
  537. int channels;
  538. };*/
  539. struct bc7_enc_state
  540. {
  541. float block[64];
  542. float opaque_err; // error for coding alpha=255
  543. float best_err;
  544. uint32 best_data[5]; // 4, +1 margin for skips
  545. // settings
  546. uniform bool mode_selection[4];
  547. uniform int refineIterations[8];
  548. uniform bool skip_mode2;
  549. uniform int fastSkipTreshold_mode1;
  550. uniform int fastSkipTreshold_mode3;
  551. uniform int fastSkipTreshold_mode7;
  552. uniform int mode45_channel0;
  553. uniform int refineIterations_channel;
  554. uniform int channels;
  555. };
  556. struct mode45_parameters
  557. {
  558. int qep[8];
  559. uint32 qblock[2];
  560. int aqep[2];
  561. uint32 aqblock[2];
  562. int rotation;
  563. int swap;
  564. };
  565. void bc7_code_mode01237(uint32 data[5], int qep[6], uint32 qblock[2], int part_id, uniform int mode);
  566. void bc7_code_mode45(uint32 data[5], mode45_parameters params[], uniform int mode);
  567. void bc7_code_mode6(uint32 data[5], int qep[8], uint32 qblock[2]);
  568. ///////////////////////////
  569. // BC7 format data
  570. static const int unquant_table_2bits[] = {0, 21, 43, 64};
  571. static const int unquant_table_3bits[] = {0, 9, 18, 27, 37, 46, 55, 64};
  572. static const int unquant_table_4bits[] = {0, 4, 9, 13, 17, 21, 26, 30, 34, 38, 43, 47, 51, 55, 60, 64};
  573. static const int* uniform unquant_tables[] = {unquant_table_2bits, unquant_table_3bits, unquant_table_4bits};
  574. inline uniform const int* uniform get_unquant_table(uniform int bits)
  575. {
  576. assert(bits>=2 && bits<=4); // invalid bit size
  577. return unquant_tables[bits-2];
  578. }
  579. static const uint32 pattern_table[] = {
  580. 0x50505050u, 0x40404040u, 0x54545454u, 0x54505040u, 0x50404000u, 0x55545450u, 0x55545040u, 0x54504000u,
  581. 0x50400000u, 0x55555450u, 0x55544000u, 0x54400000u, 0x55555440u, 0x55550000u, 0x55555500u, 0x55000000u,
  582. 0x55150100u, 0x00004054u, 0x15010000u, 0x00405054u, 0x00004050u, 0x15050100u, 0x05010000u, 0x40505054u,
  583. 0x00404050u, 0x05010100u, 0x14141414u, 0x05141450u, 0x01155440u, 0x00555500u, 0x15014054u, 0x05414150u,
  584. 0x44444444u, 0x55005500u, 0x11441144u, 0x05055050u, 0x05500550u, 0x11114444u, 0x41144114u, 0x44111144u,
  585. 0x15055054u, 0x01055040u, 0x05041050u, 0x05455150u, 0x14414114u, 0x50050550u, 0x41411414u, 0x00141400u,
  586. 0x00041504u, 0x00105410u, 0x10541000u, 0x04150400u, 0x50410514u, 0x41051450u, 0x05415014u, 0x14054150u,
  587. 0x41050514u, 0x41505014u, 0x40011554u, 0x54150140u, 0x50505500u, 0x00555050u, 0x15151010u, 0x54540404u,
  588. 0xAA685050u, 0x6A5A5040u, 0x5A5A4200u, 0x5450A0A8u, 0xA5A50000u, 0xA0A05050u, 0x5555A0A0u, 0x5A5A5050u,
  589. 0xAA550000u, 0xAA555500u, 0xAAAA5500u, 0x90909090u, 0x94949494u, 0xA4A4A4A4u, 0xA9A59450u, 0x2A0A4250u,
  590. 0xA5945040u, 0x0A425054u, 0xA5A5A500u, 0x55A0A0A0u, 0xA8A85454u, 0x6A6A4040u, 0xA4A45000u, 0x1A1A0500u,
  591. 0x0050A4A4u, 0xAAA59090u, 0x14696914u, 0x69691400u, 0xA08585A0u, 0xAA821414u, 0x50A4A450u, 0x6A5A0200u,
  592. 0xA9A58000u, 0x5090A0A8u, 0xA8A09050u, 0x24242424u, 0x00AA5500u, 0x24924924u, 0x24499224u, 0x50A50A50u,
  593. 0x500AA550u, 0xAAAA4444u, 0x66660000u, 0xA5A0A5A0u, 0x50A050A0u, 0x69286928u, 0x44AAAA44u, 0x66666600u,
  594. 0xAA444444u, 0x54A854A8u, 0x95809580u, 0x96969600u, 0xA85454A8u, 0x80959580u, 0xAA141414u, 0x96960000u,
  595. 0xAAAA1414u, 0xA05050A0u, 0xA0A5A5A0u, 0x96000000u, 0x40804080u, 0xA9A8A9A8u, 0xAAAAAA44u, 0x2A4A5254u
  596. };
  597. inline uint32 get_pattern(int part_id)
  598. {
  599. return gather_uint(pattern_table, part_id);
  600. }
  601. static const uint32 pattern_mask_table[] = {
  602. 0xCCCC3333u, 0x88887777u, 0xEEEE1111u, 0xECC81337u, 0xC880377Fu, 0xFEEC0113u, 0xFEC80137u, 0xEC80137Fu,
  603. 0xC80037FFu, 0xFFEC0013u, 0xFE80017Fu, 0xE80017FFu, 0xFFE80017u, 0xFF0000FFu, 0xFFF0000Fu, 0xF0000FFFu,
  604. 0xF71008EFu, 0x008EFF71u, 0x71008EFFu, 0x08CEF731u, 0x008CFF73u, 0x73108CEFu, 0x3100CEFFu, 0x8CCE7331u,
  605. 0x088CF773u, 0x3110CEEFu, 0x66669999u, 0x366CC993u, 0x17E8E817u, 0x0FF0F00Fu, 0x718E8E71u, 0x399CC663u,
  606. 0xAAAA5555u, 0xF0F00F0Fu, 0x5A5AA5A5u, 0x33CCCC33u, 0x3C3CC3C3u, 0x55AAAA55u, 0x96966969u, 0xA55A5AA5u,
  607. 0x73CE8C31u, 0x13C8EC37u, 0x324CCDB3u, 0x3BDCC423u, 0x69969669u, 0xC33C3CC3u, 0x99666699u, 0x0660F99Fu,
  608. 0x0272FD8Du, 0x04E4FB1Bu, 0x4E40B1BFu, 0x2720D8DFu, 0xC93636C9u, 0x936C6C93u, 0x39C6C639u, 0x639C9C63u,
  609. 0x93366CC9u, 0x9CC66339u, 0x817E7E81u, 0xE71818E7u, 0xCCF0330Fu, 0x0FCCF033u, 0x774488BBu, 0xEE2211DDu,
  610. 0x08CC0133u, 0x8CC80037u, 0xCC80006Fu, 0xEC001331u, 0x330000FFu, 0x00CC3333u, 0xFF000033u, 0xCCCC0033u,
  611. 0x0F0000FFu, 0x0FF0000Fu, 0x00F0000Fu, 0x44443333u, 0x66661111u, 0x22221111u, 0x136C0013u, 0x008C8C63u,
  612. 0x36C80137u, 0x08CEC631u, 0x3330000Fu, 0xF0000333u, 0x00EE1111u, 0x88880077u, 0x22C0113Fu, 0x443088CFu,
  613. 0x0C22F311u, 0x03440033u, 0x69969009u, 0x9960009Fu, 0x03303443u, 0x00660699u, 0xC22C3113u, 0x8C0000EFu,
  614. 0x1300007Fu, 0xC4003331u, 0x004C1333u, 0x22229999u, 0x00F0F00Fu, 0x24929249u, 0x29429429u, 0xC30C30C3u,
  615. 0xC03C3C03u, 0x00AA0055u, 0xAA0000FFu, 0x30300303u, 0xC0C03333u, 0x90900909u, 0xA00A5005u, 0xAAA0000Fu,
  616. 0x0AAA0555u, 0xE0E01111u, 0x70700707u, 0x6660000Fu, 0x0EE01111u, 0x07707007u, 0x06660999u, 0x660000FFu,
  617. 0x00660099u, 0x0CC03333u, 0x03303003u, 0x60000FFFu, 0x80807777u, 0x10100101u, 0x000A0005u, 0x08CE8421u
  618. };
  619. inline int get_pattern_mask(int part_id, int j)
  620. {
  621. uint32 mask_packed = gather_uint(pattern_mask_table, part_id);
  622. int mask0 = mask_packed&0xFFFF;
  623. int mask1 = mask_packed>>16;
  624. int mask = (j==2) ? (~mask0)&(~mask1) : ( (j==0) ? mask0 : mask1 );
  625. return mask;
  626. }
  627. static const int skip_table[] = {
  628. 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u,
  629. 0xf0u, 0x20u, 0x80u, 0x20u, 0x20u, 0x80u, 0x80u, 0xf0u, 0x20u, 0x80u, 0x20u, 0x20u, 0x80u, 0x80u, 0x20u, 0x20u,
  630. 0xf0u, 0xf0u, 0x60u, 0x80u, 0x20u, 0x80u, 0xf0u, 0xf0u, 0x20u, 0x80u, 0x20u, 0x20u, 0x20u, 0xf0u, 0xf0u, 0x60u,
  631. 0x60u, 0x20u, 0x60u, 0x80u, 0xf0u, 0xf0u, 0x20u, 0x20u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0x20u, 0x20u, 0xf0u,
  632. 0x3fu, 0x38u, 0xf8u, 0xf3u, 0x8fu, 0x3fu, 0xf3u, 0xf8u, 0x8fu, 0x8fu, 0x6fu, 0x6fu, 0x6fu, 0x5fu, 0x3fu, 0x38u,
  633. 0x3fu, 0x38u, 0x8fu, 0xf3u, 0x3fu, 0x38u, 0x6fu, 0xa8u, 0x53u, 0x8fu, 0x86u, 0x6au, 0x8fu, 0x5fu, 0xfau, 0xf8u,
  634. 0x8fu, 0xf3u, 0x3fu, 0x5au, 0x6au, 0xa8u, 0x89u, 0xfau, 0xf6u, 0x3fu, 0xf8u, 0x5fu, 0xf3u, 0xf6u, 0xf6u, 0xf8u,
  635. 0x3fu, 0xf3u, 0x5fu, 0x5fu, 0x5fu, 0x8fu, 0x5fu, 0xafu, 0x5fu, 0xafu, 0x8fu, 0xdfu, 0xf3u, 0xcfu, 0x3fu, 0x38u
  636. };
  637. inline void get_skips(int skips[3], int part_id)
  638. {
  639. int skip_packed = gather_int(skip_table, part_id);
  640. skips[0] = 0;
  641. skips[1] = skip_packed>>4;
  642. skips[2] = skip_packed&15;
  643. }
  644. ///////////////////////////
  645. // PCA helpers
  646. inline void compute_stats_masked(float stats[15], float block[64], int mask, uniform int channels)
  647. {
  648. for (uniform int i=0; i<15; i++) stats[i] = 0;
  649. int mask_shifted = mask<<1;
  650. for (uniform int k=0; k<16; k++)
  651. {
  652. mask_shifted >>= 1;
  653. //if ((mask_shifted&1) == 0) continue;
  654. int flag = (mask_shifted&1);
  655. float rgba[4];
  656. for (uniform int p=0; p<channels; p++) rgba[p] = block[k+p*16];
  657. for (uniform int p=0; p<channels; p++) rgba[p] *= flag;
  658. stats[14] += flag;
  659. stats[10] += rgba[0];
  660. stats[11] += rgba[1];
  661. stats[12] += rgba[2];
  662. stats[0] += rgba[0]*rgba[0];
  663. stats[1] += rgba[0]*rgba[1];
  664. stats[2] += rgba[0]*rgba[2];
  665. stats[4] += rgba[1]*rgba[1];
  666. stats[5] += rgba[1]*rgba[2];
  667. stats[7] += rgba[2]*rgba[2];
  668. if (channels==4)
  669. {
  670. stats[13] += rgba[3];
  671. stats[3] += rgba[0]*rgba[3];
  672. stats[6] += rgba[1]*rgba[3];
  673. stats[8] += rgba[2]*rgba[3];
  674. stats[9] += rgba[3]*rgba[3];
  675. }
  676. }
  677. }
  678. inline void covar_from_stats(float covar[10], float stats[15], uniform int channels)
  679. {
  680. covar[0] = stats[0] - stats[10+0]*stats[10+0]/stats[14];
  681. covar[1] = stats[1] - stats[10+0]*stats[10+1]/stats[14];
  682. covar[2] = stats[2] - stats[10+0]*stats[10+2]/stats[14];
  683. covar[4] = stats[4] - stats[10+1]*stats[10+1]/stats[14];
  684. covar[5] = stats[5] - stats[10+1]*stats[10+2]/stats[14];
  685. covar[7] = stats[7] - stats[10+2]*stats[10+2]/stats[14];
  686. if (channels == 4)
  687. {
  688. covar[3] = stats[3] - stats[10+0]*stats[10+3]/stats[14];
  689. covar[6] = stats[6] - stats[10+1]*stats[10+3]/stats[14];
  690. covar[8] = stats[8] - stats[10+2]*stats[10+3]/stats[14];
  691. covar[9] = stats[9] - stats[10+3]*stats[10+3]/stats[14];
  692. }
  693. }
  694. inline void compute_covar_dc_masked(float covar[6], float dc[3], float block[64], int mask, uniform int channels)
  695. {
  696. float stats[15];
  697. compute_stats_masked(stats, block, mask, channels);
  698. covar_from_stats(covar, stats, channels);
  699. for (uniform int p=0; p<channels; p++) dc[p] = stats[10+p]/stats[14];
  700. }
  701. void block_pca_axis(float axis[4], float dc[4], float block[64], int mask, uniform int channels)
  702. {
  703. uniform const int powerIterations = 8; // 4 not enough for HQ
  704. float covar[10];
  705. compute_covar_dc_masked(covar, dc, block, mask, channels);
  706. //float var = covar[0] + covar[4] + covar[7] + covar[9] + 256;
  707. float inv_var = 1.0f / (256 * 256);
  708. for (uniform int k = 0; k < 10; k++)
  709. {
  710. covar[k] *= inv_var;
  711. }
  712. float eps = sq(0.001f);
  713. covar[0] += eps;
  714. covar[4] += eps;
  715. covar[7] += eps;
  716. covar[9] += eps;
  717. compute_axis(axis, covar, powerIterations, channels);
  718. }
  719. void block_segment_core(float ep[], float block[64], int mask, uniform int channels)
  720. {
  721. float axis[4];
  722. float dc[4];
  723. block_pca_axis(axis, dc, block, mask, channels);
  724. float ext[2];
  725. ext[0] = +FLT_MAX;
  726. ext[1] = -FLT_MAX;
  727. // find min/max
  728. int mask_shifted = mask<<1;
  729. for (uniform int k=0; k<16; k++)
  730. {
  731. mask_shifted >>= 1;
  732. if ((mask_shifted&1) == 0) continue;
  733. float dot = 0;
  734. for (uniform int p=0; p<channels; p++)
  735. dot += axis[p]*(block[16*p+k]-dc[p]);
  736. ext[0] = min(ext[0], dot);
  737. ext[1] = max(ext[1], dot);
  738. }
  739. // create some distance if the endpoints collapse
  740. if (ext[1]-ext[0] < 1.0f)
  741. {
  742. ext[0] -= 0.5f;
  743. ext[1] += 0.5f;
  744. }
  745. for (uniform int i=0; i<2; i++)
  746. for (uniform int p=0; p<channels; p++)
  747. {
  748. ep[4*i+p] = ext[i]*axis[p]+dc[p];
  749. }
  750. }
  751. void block_segment(float ep[], float block[64], int mask, uniform int channels)
  752. {
  753. block_segment_core(ep, block, mask, channels);
  754. for (uniform int i=0; i<2; i++)
  755. for (uniform int p=0; p<channels; p++)
  756. {
  757. ep[4*i+p] = clamp(ep[4*i+p], 0, 255);
  758. }
  759. }
  760. float get_pca_bound(float covar[10], uniform int channels)
  761. {
  762. uniform const int powerIterations = 4; // quite approximative, but enough for bounding
  763. float inv_var = 1.0 / (256 * 256);
  764. for (uniform int k = 0; k < 10; k++)
  765. {
  766. covar[k] *= inv_var;
  767. }
  768. float eps = sq(0.001f);
  769. covar[0] += eps;
  770. covar[4] += eps;
  771. covar[7] += eps;
  772. float axis[4];
  773. compute_axis(axis, covar, powerIterations, channels);
  774. float vec[4];
  775. if (channels == 3) ssymv3(vec, covar, axis);
  776. if (channels == 4) ssymv4(vec, covar, axis);
  777. float sq_sum = 0.0f;
  778. for (uniform int p=0; p<channels; p++) sq_sum += sq(vec[p]);
  779. float lambda = sqrt(sq_sum);
  780. float bound = covar[0]+covar[4]+covar[7];
  781. if (channels == 4) bound += covar[9];
  782. bound -= lambda;
  783. bound = max(bound, 0);
  784. return bound;
  785. }
  786. float block_pca_bound(float block[64], int mask, uniform int channels)
  787. {
  788. float stats[15];
  789. compute_stats_masked(stats, block, mask, channels);
  790. float covar[10];
  791. covar_from_stats(covar, stats, channels);
  792. return get_pca_bound(covar, channels);
  793. }
  794. float block_pca_bound_split(float block[64], int mask, float full_stats[15], uniform int channels)
  795. {
  796. float stats[15];
  797. compute_stats_masked(stats, block, mask, channels);
  798. float covar1[10];
  799. covar_from_stats(covar1, stats, channels);
  800. for (uniform int i=0; i<15; i++)
  801. stats[i] = full_stats[i] - stats[i];
  802. float covar2[10];
  803. covar_from_stats(covar2, stats, channels);
  804. float bound = 0.0f;
  805. bound += get_pca_bound(covar1, channels);
  806. bound += get_pca_bound(covar2, channels);
  807. return sqrt(bound)*256;
  808. }
  809. ///////////////////////////
  810. // endpoint quantization
  811. inline int unpack_to_byte(int v, uniform const int bits)
  812. {
  813. assert(bits >= 4);
  814. int vv = v<<(8-bits);
  815. return vv + shift_right(vv, bits);
  816. }
  817. void ep_quant0367(int qep[], float ep[], uniform int mode, uniform int channels)
  818. {
  819. uniform int bits = 7;
  820. if (mode == 0) bits = 4;
  821. if (mode == 7) bits = 5;
  822. uniform int levels = 1 << bits;
  823. uniform int levels2 = levels*2-1;
  824. for (uniform int i=0; i<2; i++)
  825. {
  826. int qep_b[8];
  827. for (uniform int b=0; b<2; b++)
  828. for (uniform int p=0; p<4; p++)
  829. {
  830. int v = (int)((ep[i*4+p]/255.0f*levels2-b)/2+0.5f)*2+b;
  831. qep_b[b*4+p] = clamp(v, b, levels2-1+b);
  832. }
  833. float ep_b[8];
  834. for (uniform int j=0; j<8; j++)
  835. ep_b[j] = qep_b[j];
  836. if (mode==0)
  837. for (uniform int j=0; j<8; j++)
  838. ep_b[j] = unpack_to_byte(qep_b[j], 5);
  839. float err0 = 0.0f;
  840. float err1 = 0.0f;
  841. for (uniform int p=0; p<channels; p++)
  842. {
  843. err0 += sq(ep[i*4+p]-ep_b[0+p]);
  844. err1 += sq(ep[i*4+p]-ep_b[4+p]);
  845. }
  846. if(channels==4 && ep[i*4+3]<=0.5f)err0=-1; // ESENTHEL CHANGED, BC7 allows to encode end points in 2 quantized modes, #1 standard, #2 add "0.5*levels" to all channels (1 extra bit precision, however it affects all channels at the same time, so if we have alpha=0, but RGB channels have smaller error with the extra 0.5 value, then alpha would get the +0.5 too, and it could destroy complete transparency, so this code always forces #1 version if we have alpha=0)
  847. for (uniform int p=0; p<4; p++)
  848. qep[i*4+p] = (err0<err1) ? qep_b[0+p] : qep_b[4+p];
  849. }
  850. }
  851. void ep_quant1(int qep[], float ep[], uniform int mode)
  852. {
  853. int qep_b[16];
  854. for (uniform int b=0; b<2; b++)
  855. for (uniform int i=0; i<8; i++)
  856. {
  857. int v = ((int)((ep[i]/255.0f*127.0f-b)/2+0.5f))*2+b;
  858. qep_b[b*8+i] = clamp(v, b, 126+b);
  859. }
  860. // dequant
  861. float ep_b[16];
  862. for (uniform int k=0; k<16; k++)
  863. ep_b[k] = unpack_to_byte(qep_b[k], 7);
  864. float err0 = 0.0f;
  865. float err1 = 0.0f;
  866. for (uniform int j = 0; j < 2; j++)
  867. for (uniform int p = 0; p < 3; p++)
  868. {
  869. err0 += sq(ep[j * 4 + p] - ep_b[0 + j * 4 + p]);
  870. err1 += sq(ep[j * 4 + p] - ep_b[8 + j * 4 + p]);
  871. }
  872. for (uniform int i=0; i<8; i++)
  873. qep[i] = (err0<err1) ? qep_b[0+i] : qep_b[8+i];
  874. }
  875. void ep_quant245(int qep[], float ep[], uniform int mode)
  876. {
  877. uniform int bits = 5;
  878. if (mode == 5) bits = 7;
  879. uniform int levels = 1 << bits;
  880. for (uniform int i=0; i<8; i++)
  881. {
  882. int v = ((int)(ep[i]/255.0f*(levels-1)+0.5f));
  883. qep[i] = clamp(v, 0, levels-1);
  884. }
  885. }
  886. static const int pairs_table[] = {3,2,3,2,1,1,1,2};
  887. void ep_quant(int qep[], float ep[], uniform int mode, uniform int channels)
  888. {
  889. assert(mode <= 7);
  890. uniform const int pairs = pairs_table[mode];
  891. if (mode == 0 || mode == 3 || mode == 6 || mode == 7)
  892. {
  893. for (uniform int i=0; i<pairs; i++)
  894. ep_quant0367(&qep[i*8], &ep[i*8], mode, channels);
  895. }
  896. else if (mode == 1)
  897. {
  898. for (uniform int i=0; i<pairs; i++)
  899. ep_quant1(&qep[i*8], &ep[i*8], mode);
  900. }
  901. else if (mode == 2 || mode == 4 || mode == 5)
  902. {
  903. for (uniform int i=0; i<pairs; i++)
  904. ep_quant245(&qep[i*8], &ep[i*8], mode);
  905. }
  906. else
  907. assert(false);
  908. }
  909. void ep_dequant(float ep[], int qep[], uniform int mode)
  910. {
  911. assert(mode <= 7);
  912. uniform const int pairs = pairs_table[mode];
  913. // mode 3, 6 are 8-bit
  914. if (mode == 3 || mode == 6)
  915. {
  916. for (uniform int i=0; i<8*pairs; i++)
  917. ep[i] = qep[i];
  918. }
  919. else if (mode == 1 || mode == 5)
  920. {
  921. for (uniform int i=0; i<8*pairs; i++)
  922. ep[i] = unpack_to_byte(qep[i], 7);
  923. }
  924. else if (mode == 0 || mode == 2 || mode == 4)
  925. {
  926. for (uniform int i=0; i<8*pairs; i++)
  927. ep[i] = unpack_to_byte(qep[i], 5);
  928. }
  929. else if (mode == 7)
  930. {
  931. for (uniform int i=0; i<8*pairs; i++)
  932. ep[i] = unpack_to_byte(qep[i], 6);
  933. }
  934. else
  935. assert(false);
  936. }
  937. void ep_quant_dequant(int qep[], float ep[], uniform int mode, uniform int channels)
  938. {
  939. ep_quant(qep, ep, mode, channels);
  940. ep_dequant(ep, qep, mode);
  941. }
  942. ///////////////////////////
  943. // pixel quantization
  944. float block_quant(uint32 qblock[2], float block[64], uniform int bits, float ep[], uint32 pattern, uniform int channels)
  945. {
  946. float total_err = 0;
  947. uniform const int* uniform unquant_table = get_unquant_table(bits);
  948. int levels = 1 << bits;
  949. // 64-bit qblock: 5% overhead in this function
  950. for (uniform int k=0; k<2; k++) qblock[k] = 0;
  951. int pattern_shifted = pattern;
  952. for (uniform int k=0; k<16; k++)
  953. {
  954. int j = pattern_shifted&3;
  955. pattern_shifted >>= 2;
  956. float proj = 0;
  957. float div = 0;
  958. for (uniform int p=0; p<channels; p++)
  959. {
  960. float ep_a = gather_float(ep, 8*j+0+p);
  961. float ep_b = gather_float(ep, 8*j+4+p);
  962. proj += (block[k+p*16]-ep_a)*(ep_b-ep_a);
  963. div += sq(ep_b-ep_a);
  964. }
  965. proj /= div;
  966. int q1 = (int)(proj*levels+0.5f);
  967. q1 = clamp(q1, 1, levels-1);
  968. float err0 = 0;
  969. float err1 = 0;
  970. int w0 = gather_int(unquant_table, q1-1);
  971. int w1 = gather_int(unquant_table, q1);
  972. for (uniform int p=0; p<channels; p++)
  973. {
  974. float ep_a = gather_float(ep, 8*j+0+p);
  975. float ep_b = gather_float(ep, 8*j+4+p);
  976. float dec_v0 = (int)(((64-w0)*ep_a + w0*ep_b + 32)/64);
  977. float dec_v1 = (int)(((64-w1)*ep_a + w1*ep_b + 32)/64);
  978. err0 += sq(dec_v0 - block[k+p*16]);
  979. err1 += sq(dec_v1 - block[k+p*16]);
  980. }
  981. int best_err = err1;
  982. int best_q = q1;
  983. if (err0<err1)
  984. {
  985. best_err = err0;
  986. best_q = q1-1;
  987. }
  988. assert(best_q>=0 && best_q<=levels-1);
  989. qblock[k/8] += ((uint32)best_q) << 4*(k%8);
  990. total_err += best_err;
  991. }
  992. return total_err;
  993. }
  994. ///////////////////////////
  995. // LS endpoint refinement
  996. void opt_endpoints(float ep[], float block[64], uniform int bits, uint32 qblock[2], int mask, uniform int channels)
  997. {
  998. uniform int levels = 1 << bits;
  999. float Atb1[4] = {0,0,0,0};
  1000. float sum_q = 0;
  1001. float sum_qq = 0;
  1002. float sum[5] = {0,0,0,0,0};
  1003. int mask_shifted = mask<<1;
  1004. for (uniform int k1=0; k1<2; k1++)
  1005. {
  1006. uint32 qbits_shifted = qblock[k1];
  1007. for (uniform int k2=0; k2<8; k2++)
  1008. {
  1009. uniform int k = k1*8+k2;
  1010. float q = (int)(qbits_shifted&15);
  1011. qbits_shifted >>= 4;
  1012. mask_shifted >>= 1;
  1013. if ((mask_shifted&1) == 0) continue;
  1014. int x = (levels-1)-q;
  1015. int y = q;
  1016. sum_q += q;
  1017. sum_qq += q*q;
  1018. sum[4] += 1;
  1019. for (uniform int p=0; p<channels; p++) sum[p] += block[k+p*16];
  1020. for (uniform int p=0; p<channels; p++) Atb1[p] += x*block[k+p*16];
  1021. }
  1022. }
  1023. float Atb2[4];
  1024. for (uniform int p=0; p<channels; p++)
  1025. {
  1026. //sum[p] = dc[p]*16;
  1027. Atb2[p] = (levels-1)*sum[p]-Atb1[p];
  1028. }
  1029. float Cxx = sum[4]*sq(levels-1)-2*(levels-1)*sum_q+sum_qq;
  1030. float Cyy = sum_qq;
  1031. float Cxy = (levels-1)*sum_q-sum_qq;
  1032. float scale = (levels-1) / (Cxx*Cyy - Cxy*Cxy);
  1033. for (uniform int p=0; p<channels; p++)
  1034. {
  1035. ep[0+p] = (Atb1[p]*Cyy - Atb2[p]*Cxy)*scale;
  1036. ep[4+p] = (Atb2[p]*Cxx - Atb1[p]*Cxy)*scale;
  1037. //ep[0+p] = clamp(ep[0+p], 0, 255);
  1038. //ep[4+p] = clamp(ep[4+p], 0, 255);
  1039. }
  1040. if (abs(Cxx*Cyy - Cxy*Cxy) < 0.001f)
  1041. {
  1042. // flatten
  1043. for (uniform int p=0; p<channels; p++)
  1044. {
  1045. ep[0+p] = sum[p]/sum[4];
  1046. ep[4+p] = ep[0+p];
  1047. }
  1048. }
  1049. }
  1050. //////////////////////////
  1051. // parameter estimation
  1052. float compute_opaque_err(float block[64], uniform int channels)
  1053. {
  1054. if (channels == 3) return 0;
  1055. float err = 0.0f;
  1056. for (uniform int k=0; k<16; k++)
  1057. {
  1058. err += sq(block[48+k]-255);
  1059. }
  1060. return err;
  1061. }
  1062. float bc7_enc_mode01237_part_fast(int qep[24], uint32 qblock[2], float block[64], int part_id, uniform int mode)
  1063. {
  1064. uint32 pattern = get_pattern(part_id);
  1065. uniform int bits = 2; if (mode == 0 || mode == 1) bits = 3;
  1066. uniform int pairs = 2; if (mode == 0 || mode == 2) pairs = 3;
  1067. uniform int channels = 3; if (mode == 7) channels = 4;
  1068. float ep[24];
  1069. for (uniform int j=0; j<pairs; j++)
  1070. {
  1071. int mask = get_pattern_mask(part_id, j);
  1072. block_segment(&ep[j*8], block, mask, channels);
  1073. }
  1074. ep_quant_dequant(qep, ep, mode, channels);
  1075. float total_err = block_quant(qblock, block, bits, ep, pattern, channels);
  1076. return total_err;
  1077. }
  1078. void bc7_enc_mode01237(bc7_enc_state state[], uniform int mode, int part_list[], uniform int part_count)
  1079. {
  1080. if (part_count == 0) return;
  1081. uniform int bits = 2; if (mode == 0 || mode == 1) bits = 3;
  1082. uniform int pairs = 2; if (mode == 0 || mode == 2) pairs = 3;
  1083. uniform int channels = 3; if (mode == 7) channels = 4;
  1084. int best_qep[24];
  1085. uint32 best_qblock[2];
  1086. int best_part_id = -1;
  1087. float best_err = FLT_MAX;
  1088. for (uniform int part=0; part<part_count; part++)
  1089. {
  1090. int part_id = part_list[part]&63;
  1091. if (pairs == 3) part_id += 64;
  1092. int qep[24];
  1093. uint32 qblock[2];
  1094. float err = bc7_enc_mode01237_part_fast(qep, qblock, state->block, part_id, mode);
  1095. if (err<best_err)
  1096. {
  1097. for (uniform int i=0; i<8*pairs; i++) best_qep[i] = qep[i];
  1098. for (uniform int k=0; k<2; k++) best_qblock[k] = qblock[k];
  1099. best_part_id = part_id;
  1100. best_err = err;
  1101. }
  1102. }
  1103. // refine
  1104. uniform int refineIterations = state->refineIterations[mode];
  1105. for (uniform int _=0; _<refineIterations; _++)
  1106. {
  1107. float ep[24];
  1108. for (uniform int j=0; j<pairs; j++)
  1109. {
  1110. int mask = get_pattern_mask(best_part_id, j);
  1111. opt_endpoints(&ep[j*8], state->block, bits, best_qblock, mask, channels);
  1112. }
  1113. int qep[24];
  1114. uint32 qblock[2];
  1115. ep_quant_dequant(qep, ep, mode, channels);
  1116. uint32 pattern = get_pattern(best_part_id);
  1117. float err = block_quant(qblock, state->block, bits, ep, pattern, channels);
  1118. if (err<best_err)
  1119. {
  1120. for (uniform int i=0; i<8*pairs; i++) best_qep[i] = qep[i];
  1121. for (uniform int k=0; k<2; k++) best_qblock[k] = qblock[k];
  1122. best_err = err;
  1123. }
  1124. }
  1125. if (mode != 7) best_err += state->opaque_err; // take into account alpha channel
  1126. if (best_err<state->best_err)
  1127. {
  1128. state->best_err = best_err;
  1129. bc7_code_mode01237(state->best_data, best_qep, best_qblock, best_part_id, mode);
  1130. }
  1131. }
  1132. void partial_sort_list(int list[], uniform int length, uniform int partial_count)
  1133. {
  1134. for (uniform int k=0; k<partial_count; k++)
  1135. {
  1136. int best_idx = k;
  1137. int best_value = list[k];
  1138. for (uniform int i=k+1; i<length; i++)
  1139. {
  1140. if (best_value > list[i])
  1141. {
  1142. best_value = list[i];
  1143. best_idx = i;
  1144. }
  1145. }
  1146. // swap
  1147. scatter_int(list, best_idx, list[k]);
  1148. list[k] = best_value;
  1149. }
  1150. }
  1151. void bc7_enc_mode02(bc7_enc_state state[])
  1152. {
  1153. int part_list[64];
  1154. for (uniform int part=0; part<64; part++)
  1155. part_list[part] = part;
  1156. bc7_enc_mode01237(state, 0, part_list, 16);
  1157. if (!state->skip_mode2) bc7_enc_mode01237(state, 2, part_list, 64); // usually not worth the time
  1158. }
  1159. void bc7_enc_mode13(bc7_enc_state state[])
  1160. {
  1161. if (state->fastSkipTreshold_mode1 == 0 && state->fastSkipTreshold_mode3 == 0) return;
  1162. float full_stats[15];
  1163. compute_stats_masked(full_stats, state->block, -1, 3);
  1164. int part_list[64];
  1165. for (uniform int part=0; part<64; part++)
  1166. {
  1167. int mask = get_pattern_mask(part+0, 0);
  1168. float bound12 = block_pca_bound_split(state->block, mask, full_stats, 3);
  1169. int bound = (int)(bound12);
  1170. part_list[part] = part+bound*64;
  1171. }
  1172. partial_sort_list(part_list, 64, max(state->fastSkipTreshold_mode1, state->fastSkipTreshold_mode3));
  1173. bc7_enc_mode01237(state, 1, part_list, state->fastSkipTreshold_mode1);
  1174. bc7_enc_mode01237(state, 3, part_list, state->fastSkipTreshold_mode3);
  1175. }
  1176. void bc7_enc_mode7(bc7_enc_state state[])
  1177. {
  1178. if (state->fastSkipTreshold_mode7 == 0) return;
  1179. float full_stats[15];
  1180. compute_stats_masked(full_stats, state->block, -1, state->channels);
  1181. int part_list[64];
  1182. for (uniform int part=0; part<64; part++)
  1183. {
  1184. int mask = get_pattern_mask(part+0, 0);
  1185. float bound12 = block_pca_bound_split(state->block, mask, full_stats, state->channels);
  1186. int bound = (int)(bound12);
  1187. part_list[part] = part+bound*64;
  1188. }
  1189. partial_sort_list(part_list, 64, state->fastSkipTreshold_mode7);
  1190. bc7_enc_mode01237(state, 7, part_list, state->fastSkipTreshold_mode7);
  1191. }
  1192. void channel_quant_dequant(int qep[2], float ep[2], uniform int epbits)
  1193. {
  1194. int elevels = (1<<epbits);
  1195. for (uniform int i=0; i<2; i++)
  1196. {
  1197. int v = ((int)(ep[i]/255.0f*(elevels-1)+0.5f));
  1198. qep[i] = clamp(v, 0, elevels-1);
  1199. ep[i] = unpack_to_byte(qep[i], epbits);
  1200. }
  1201. }
  1202. void channel_opt_endpoints(float ep[2], float block[16], uniform int bits, uint32 qblock[2])
  1203. {
  1204. uniform int levels = 1 << bits;
  1205. float Atb1 = 0;
  1206. float sum_q = 0;
  1207. float sum_qq = 0;
  1208. float sum = 0;
  1209. for (uniform int k1=0; k1<2; k1++)
  1210. {
  1211. uint32 qbits_shifted = qblock[k1];
  1212. for (uniform int k2=0; k2<8; k2++)
  1213. {
  1214. uniform int k = k1*8+k2;
  1215. float q = (int)(qbits_shifted&15);
  1216. qbits_shifted >>= 4;
  1217. int x = (levels-1)-q;
  1218. int y = q;
  1219. sum_q += q;
  1220. sum_qq += q*q;
  1221. sum += block[k];
  1222. Atb1 += x*block[k];
  1223. }
  1224. }
  1225. float Atb2 = (levels-1)*sum-Atb1;
  1226. float Cxx = 16*sq(levels-1)-2*(levels-1)*sum_q+sum_qq;
  1227. float Cyy = sum_qq;
  1228. float Cxy = (levels-1)*sum_q-sum_qq;
  1229. float scale = (levels-1) / (Cxx*Cyy - Cxy*Cxy);
  1230. ep[0] = (Atb1*Cyy - Atb2*Cxy)*scale;
  1231. ep[1] = (Atb2*Cxx - Atb1*Cxy)*scale;
  1232. ep[0] = clamp(ep[0], 0, 255);
  1233. ep[1] = clamp(ep[1], 0, 255);
  1234. if (abs(Cxx*Cyy - Cxy*Cxy) < 0.001f)
  1235. {
  1236. ep[0] = sum/16;
  1237. ep[1] = ep[0];
  1238. }
  1239. }
  1240. float channel_opt_quant(uint32 qblock[2], float block[16], uniform int bits, float ep[])
  1241. {
  1242. uniform const int* uniform unquant_table = get_unquant_table(bits);
  1243. int levels = (1<<bits);
  1244. qblock[0] = 0;
  1245. qblock[1] = 0;
  1246. float total_err = 0;
  1247. for (uniform int k=0; k<16; k++)
  1248. {
  1249. float proj = (block[k]-ep[0])/(ep[1]-ep[0]+0.001f);
  1250. int q1 = (int)(proj*levels+0.5f);
  1251. q1 = clamp(q1, 1, levels-1);
  1252. float err0 = 0;
  1253. float err1 = 0;
  1254. int w0 = gather_int(unquant_table, q1-1);
  1255. int w1 = gather_int(unquant_table, q1);
  1256. float dec_v0 = (int)(((64-w0)*ep[0] + w0*ep[1] + 32)/64);
  1257. float dec_v1 = (int)(((64-w1)*ep[0] + w1*ep[1] + 32)/64);
  1258. err0 += sq(dec_v0 - block[k]);
  1259. err1 += sq(dec_v1 - block[k]);
  1260. int best_err = err1;
  1261. int best_q = q1;
  1262. if (err0<err1)
  1263. {
  1264. best_err = err0;
  1265. best_q = q1-1;
  1266. }
  1267. qblock[k/8] += ((uint32)best_q) << 4*(k%8);
  1268. total_err += best_err;
  1269. }
  1270. return total_err;
  1271. }
  1272. float opt_channel(bc7_enc_state state[], uint32 qblock[2], int qep[2], float block[16], uniform int bits, uniform int epbits)
  1273. {
  1274. float ep[2] = {255,0};
  1275. for (uniform int k=0; k<16; k++)
  1276. {
  1277. ep[0] = min(ep[0], block[k]);
  1278. ep[1] = max(ep[1], block[k]);
  1279. }
  1280. channel_quant_dequant(qep, ep, epbits);
  1281. float err = channel_opt_quant(qblock, block, bits, ep);
  1282. // refine
  1283. uniform const int refineIterations = state->refineIterations_channel;
  1284. for (uniform int i=0; i<refineIterations; i++)
  1285. {
  1286. channel_opt_endpoints(ep, block, bits, qblock);
  1287. channel_quant_dequant(qep, ep, epbits);
  1288. err = channel_opt_quant(qblock, block, bits, ep);
  1289. }
  1290. return err;
  1291. }
  1292. void bc7_enc_mode45_candidate(bc7_enc_state state[], mode45_parameters best_candidate[],
  1293. float best_err[], uniform int mode, uniform int rotation, uniform int swap)
  1294. {
  1295. uniform int bits = 2;
  1296. uniform int abits = 2; if (mode==4) abits = 3;
  1297. uniform int aepbits = 8; if (mode==4) aepbits = 6;
  1298. if (swap==1) { bits = 3; abits = 2; } // (mode 4)
  1299. float block[48];
  1300. for (uniform int k=0; k<16; k++)
  1301. {
  1302. for (uniform int p=0; p<3; p++)
  1303. block[k+p*16] = state->block[k+p*16];
  1304. if (rotation < 3)
  1305. {
  1306. // apply channel rotation
  1307. if (state->channels == 4) block[k+rotation*16] = state->block[k+3*16];
  1308. if (state->channels == 3) block[k+rotation*16] = 255;
  1309. }
  1310. }
  1311. float ep[8];
  1312. block_segment(ep, block, -1, 3);
  1313. int qep[8];
  1314. ep_quant_dequant(qep, ep, mode, 3);
  1315. uint32 qblock[2];
  1316. float err = block_quant(qblock, block, bits, ep, 0, 3);
  1317. // refine
  1318. uniform int refineIterations = state->refineIterations[mode];
  1319. for (uniform int i=0; i<refineIterations; i++)
  1320. {
  1321. opt_endpoints(ep, block, bits, qblock, -1, 3);
  1322. ep_quant_dequant(qep, ep, mode, 3);
  1323. err = block_quant(qblock, block, bits, ep, 0, 3);
  1324. }
  1325. // encoding selected channel
  1326. int aqep[2];
  1327. uint32 aqblock[2];
  1328. err += opt_channel(state, aqblock, aqep, &state->block[rotation*16], abits, aepbits);
  1329. if (err<*best_err)
  1330. {
  1331. swap_ints(best_candidate->qep, qep, 8);
  1332. swap_uints(best_candidate->qblock, qblock, 2);
  1333. swap_ints(best_candidate->aqep, aqep, 2);
  1334. swap_uints(best_candidate->aqblock, aqblock, 2);
  1335. best_candidate->rotation = rotation;
  1336. best_candidate->swap = swap;
  1337. *best_err = err;
  1338. }
  1339. }
  1340. void bc7_enc_mode45(bc7_enc_state state[])
  1341. {
  1342. mode45_parameters best_candidate;
  1343. float best_err = state->best_err;
  1344. memset(&best_candidate, 0, sizeof(mode45_parameters));
  1345. uniform int channel0 = state->mode45_channel0;
  1346. for (uniform int p=channel0; p<state->channels; p++)
  1347. {
  1348. bc7_enc_mode45_candidate(state, &best_candidate, &best_err, 4, p, 0);
  1349. bc7_enc_mode45_candidate(state, &best_candidate, &best_err, 4, p, 1);
  1350. }
  1351. // mode 4
  1352. if (best_err<state->best_err)
  1353. {
  1354. state->best_err = best_err;
  1355. bc7_code_mode45(state->best_data, &best_candidate, 4);
  1356. }
  1357. for (uniform int p=channel0; p<state->channels; p++)
  1358. {
  1359. bc7_enc_mode45_candidate(state, &best_candidate, &best_err, 5, p, 0);
  1360. }
  1361. // mode 5
  1362. if (best_err<state->best_err)
  1363. {
  1364. state->best_err = best_err;
  1365. bc7_code_mode45(state->best_data, &best_candidate, 5);
  1366. }
  1367. }
  1368. void bc7_enc_mode6(bc7_enc_state state[])
  1369. {
  1370. uniform int mode = 6;
  1371. uniform int bits = 4;
  1372. float ep[8];
  1373. block_segment(ep, state->block, -1, state->channels);
  1374. if (state->channels == 3)
  1375. {
  1376. ep[3] = ep[7] = 255;
  1377. }
  1378. int qep[8];
  1379. ep_quant_dequant(qep, ep, mode, state->channels);
  1380. uint32 qblock[2];
  1381. float err = block_quant(qblock, state->block, bits, ep, 0, state->channels);
  1382. // refine
  1383. uniform int refineIterations = state->refineIterations[mode];
  1384. for (uniform int i=0; i<refineIterations; i++)
  1385. {
  1386. opt_endpoints(ep, state->block, bits, qblock, -1, state->channels);
  1387. ep_quant_dequant(qep, ep, mode, state->channels);
  1388. err = block_quant(qblock, state->block, bits, ep, 0, state->channels);
  1389. }
  1390. if (err<state->best_err)
  1391. {
  1392. state->best_err = err;
  1393. bc7_code_mode6(state->best_data, qep, qblock);
  1394. }
  1395. }
  1396. //////////////////////////
  1397. // BC7 bitstream coding
  1398. void bc7_code_apply_swap_mode456(int qep[], uniform int channels, uint32 qblock[2], uniform int bits)
  1399. {
  1400. uniform int levels = 1 << bits;
  1401. if ((qblock[0]&15)>=levels/2)
  1402. {
  1403. swap_ints(&qep[0], &qep[channels], channels);
  1404. for (uniform int k=0; k<2; k++)
  1405. qblock[k] = (uint32)(0x11111111*(levels-1)) - qblock[k];
  1406. }
  1407. assert((qblock[0]&15) < levels/2);
  1408. }
  1409. int bc7_code_apply_swap_mode01237(int qep[], uint32 qblock[2], uniform int mode, int part_id)
  1410. {
  1411. uniform int bits = 2; if (mode == 0 || mode == 1) bits = 3;
  1412. uniform int pairs = 2; if (mode == 0 || mode == 2) pairs = 3;
  1413. int flips = 0;
  1414. uniform int levels = 1 << bits;
  1415. int skips[3];
  1416. get_skips(skips, part_id);
  1417. for (uniform int j=0; j<pairs; j++)
  1418. {
  1419. int k0 = skips[j];
  1420. //int q = (qblock[k0/8]>>((k0%8)*4))&15;
  1421. int q = ((gather_uint(qblock, k0>>3)<<(28-(k0&7)*4))>>28);
  1422. if (q>=levels/2)
  1423. {
  1424. swap_ints(&qep[8*j], &qep[8*j+4], 4);
  1425. uint32 pmask = get_pattern_mask(part_id, j);
  1426. flips |= pmask;
  1427. }
  1428. }
  1429. return flips;
  1430. }
  1431. void put_bits(uint32 data[5], uniform int* uniform pos, uniform int bits, int v)
  1432. {
  1433. assert(v<pow2(bits));
  1434. data[*pos/32] |= ((uint32)v) << (*pos%32);
  1435. if (*pos%32+bits>32)
  1436. {
  1437. data[*pos/32+1] |= shift_right(v, 32-*pos%32);
  1438. }
  1439. *pos += bits;
  1440. }
  1441. inline void data_shl_1bit_from(uint32 data[5], int from)
  1442. {
  1443. if (from < 96)
  1444. {
  1445. assert(from > 64+10);
  1446. uint32 shifted = (data[2]>>1) | (data[3]<<31);
  1447. uint32 mask = (pow2(from-64)-1)>>1;
  1448. data[2] = (mask&data[2]) | (~mask&shifted);
  1449. data[3] = (data[3]>>1) | (data[4]<<31);
  1450. data[4] = data[4]>>1;
  1451. }
  1452. else if (from < 128)
  1453. {
  1454. uint32 shifted = (data[3]>>1) | (data[4]<<31);
  1455. uint32 mask = (pow2(from-96)-1)>>1;
  1456. data[3] = (mask&data[3]) | (~mask&shifted);
  1457. data[4] = data[4]>>1;
  1458. }
  1459. }
  1460. void bc7_code_qblock(uint32 data[5], uniform int* uniform pPos, uint32 qblock[2], uniform int bits, int flips)
  1461. {
  1462. uniform int levels = 1 << bits;
  1463. int flips_shifted = flips;
  1464. for (uniform int k1=0; k1<2; k1++)
  1465. {
  1466. uint32 qbits_shifted = qblock[k1];
  1467. for (uniform int k2=0; k2<8; k2++)
  1468. {
  1469. int q = qbits_shifted&15;
  1470. if ((flips_shifted&1)>0) q = (levels-1)-q;
  1471. if (k1==0 && k2==0) put_bits(data, pPos, bits-1, q);
  1472. else put_bits(data, pPos, bits , q);
  1473. qbits_shifted >>= 4;
  1474. flips_shifted >>= 1;
  1475. }
  1476. }
  1477. }
  1478. void bc7_code_adjust_skip_mode01237(uint32 data[5], uniform int mode, int part_id)
  1479. {
  1480. uniform int bits = 2; if (mode == 0 || mode == 1) bits = 3;
  1481. uniform int pairs = 2; if (mode == 0 || mode == 2) pairs = 3;
  1482. int skips[3];
  1483. get_skips(skips, part_id);
  1484. if (pairs>2 && skips[1] < skips[2])
  1485. {
  1486. int t = skips[1]; skips[1] = skips[2]; skips[2] = t;
  1487. }
  1488. for (uniform int j=1; j<pairs; j++)
  1489. {
  1490. int k = skips[j];
  1491. data_shl_1bit_from(data, 128+(pairs-1)-(15-k)*bits);
  1492. }
  1493. }
  1494. void bc7_code_mode01237(uint32 data[5], int qep[], uint32 qblock[2], int part_id, uniform int mode)
  1495. {
  1496. uniform int bits = 2; if (mode == 0 || mode == 1) bits = 3;
  1497. uniform int pairs = 2; if (mode == 0 || mode == 2) pairs = 3;
  1498. uniform int channels = 3; if (mode == 7) channels = 4;
  1499. int flips = bc7_code_apply_swap_mode01237(qep, qblock, mode, part_id);
  1500. for (uniform int k=0; k<5; k++) data[k] = 0;
  1501. uniform int pos = 0;
  1502. // mode 0-3, 7
  1503. put_bits(data, &pos, mode+1, 1<<mode);
  1504. // partition
  1505. if (mode==0)
  1506. {
  1507. put_bits(data, &pos, 4, part_id&15);
  1508. }
  1509. else
  1510. {
  1511. put_bits(data, &pos, 6, part_id&63);
  1512. }
  1513. // endpoints
  1514. for (uniform int p=0; p<channels; p++)
  1515. for (uniform int j=0; j<pairs*2; j++)
  1516. {
  1517. if (mode == 0)
  1518. {
  1519. put_bits(data, &pos, 4, qep[j*4+0+p]>>1);
  1520. }
  1521. else if (mode == 1)
  1522. {
  1523. put_bits(data, &pos, 6, qep[j*4+0+p]>>1);
  1524. }
  1525. else if (mode == 2)
  1526. {
  1527. put_bits(data, &pos, 5, qep[j*4+0+p]);
  1528. }
  1529. else if (mode == 3)
  1530. {
  1531. put_bits(data, &pos, 7, qep[j*4+0+p]>>1);
  1532. }
  1533. else if (mode == 7)
  1534. {
  1535. put_bits(data, &pos, 5, qep[j*4+0+p]>>1);
  1536. }
  1537. else
  1538. {
  1539. assert(false);
  1540. }
  1541. }
  1542. // p bits
  1543. if (mode == 1)
  1544. for (uniform int j=0; j<2; j++)
  1545. {
  1546. put_bits(data, &pos, 1, qep[j*8]&1);
  1547. }
  1548. if (mode == 0 || mode == 3 || mode == 7)
  1549. for (uniform int j=0; j<pairs*2; j++)
  1550. {
  1551. put_bits(data, &pos, 1, qep[j*4]&1);
  1552. }
  1553. // quantized values
  1554. bc7_code_qblock(data, &pos, qblock, bits, flips);
  1555. bc7_code_adjust_skip_mode01237(data, mode, part_id);
  1556. }
  1557. void bc7_code_mode45(uint32 data[5], varying mode45_parameters* uniform params, uniform int mode)
  1558. {
  1559. int qep[8];
  1560. uint32 qblock[2];
  1561. int aqep[2];
  1562. uint32 aqblock[2];
  1563. swap_ints(params->qep, qep, 8);
  1564. swap_uints(params->qblock, qblock, 2);
  1565. swap_ints(params->aqep, aqep, 2);
  1566. swap_uints(params->aqblock, aqblock, 2);
  1567. int rotation = params->rotation;
  1568. int swap = params->swap;
  1569. uniform int bits = 2;
  1570. uniform int abits = 2; if (mode==4) abits = 3;
  1571. uniform int epbits = 7; if (mode==4) epbits = 5;
  1572. uniform int aepbits = 8; if (mode==4) aepbits = 6;
  1573. if (!swap)
  1574. {
  1575. bc7_code_apply_swap_mode456(qep, 4, qblock, bits);
  1576. bc7_code_apply_swap_mode456(aqep, 1, aqblock, abits);
  1577. }
  1578. else
  1579. {
  1580. swap_uints(qblock, aqblock, 2);
  1581. bc7_code_apply_swap_mode456(aqep, 1, qblock, bits);
  1582. bc7_code_apply_swap_mode456(qep, 4, aqblock, abits);
  1583. }
  1584. for (uniform int k=0; k<5; k++) data[k] = 0;
  1585. uniform int pos = 0;
  1586. // mode 4-5
  1587. put_bits(data, &pos, mode+1, 1<<mode);
  1588. // rotation
  1589. //put_bits(data, &pos, 2, (rotation+1)%4);
  1590. put_bits(data, &pos, 2, (rotation+1)&3);
  1591. if (mode==4)
  1592. {
  1593. put_bits(data, &pos, 1, swap);
  1594. }
  1595. // endpoints
  1596. for (uniform int p=0; p<3; p++)
  1597. {
  1598. put_bits(data, &pos, epbits, qep[0+p]);
  1599. put_bits(data, &pos, epbits, qep[4+p]);
  1600. }
  1601. // alpha endpoints
  1602. put_bits(data, &pos, aepbits, aqep[0]);
  1603. put_bits(data, &pos, aepbits, aqep[1]);
  1604. // quantized values
  1605. bc7_code_qblock(data, &pos, qblock, bits, 0);
  1606. bc7_code_qblock(data, &pos, aqblock, abits, 0);
  1607. }
  1608. void bc7_code_mode6(uint32 data[5], int qep[8], uint32 qblock[2])
  1609. {
  1610. bc7_code_apply_swap_mode456(qep, 4, qblock, 4);
  1611. for (uniform int k=0; k<5; k++) data[k] = 0;
  1612. uniform int pos = 0;
  1613. // mode 6
  1614. put_bits(data, &pos, 7, 64);
  1615. // endpoints
  1616. for (uniform int p=0; p<4; p++)
  1617. {
  1618. put_bits(data, &pos, 7, qep[0+p]>>1);
  1619. put_bits(data, &pos, 7, qep[4+p]>>1);
  1620. }
  1621. // p bits
  1622. put_bits(data, &pos, 1, qep[0]&1);
  1623. put_bits(data, &pos, 1, qep[4]&1);
  1624. // quantized values
  1625. bc7_code_qblock(data, &pos, qblock, 4, 0);
  1626. }
  1627. //////////////////////////
  1628. // BC7 core
  1629. inline void CompressBlockBC7_core(bc7_enc_state state[])
  1630. {
  1631. if (state->mode_selection[0]) bc7_enc_mode02(state);
  1632. if (state->mode_selection[1]) bc7_enc_mode13(state);
  1633. if (state->mode_selection[1]) bc7_enc_mode7(state);
  1634. if (state->mode_selection[2]) bc7_enc_mode45(state);
  1635. if (state->mode_selection[3]) bc7_enc_mode6(state);
  1636. }
  1637. void bc7_enc_copy_settings(bc7_enc_state state[], uniform bc7_enc_settings settings[])
  1638. {
  1639. state->channels = settings->channels;
  1640. // mode02
  1641. state->mode_selection[0] = settings->mode_selection[0];
  1642. state->skip_mode2 = settings->skip_mode2;
  1643. state->refineIterations[0] = settings->refineIterations[0];
  1644. state->refineIterations[2] = settings->refineIterations[2];
  1645. // mode137
  1646. state->mode_selection[1] = settings->mode_selection[1];
  1647. state->fastSkipTreshold_mode1 = settings->fastSkipTreshold_mode1;
  1648. state->fastSkipTreshold_mode3 = settings->fastSkipTreshold_mode3;
  1649. state->fastSkipTreshold_mode7 = settings->fastSkipTreshold_mode7;
  1650. state->refineIterations[1] = settings->refineIterations[1];
  1651. state->refineIterations[3] = settings->refineIterations[3];
  1652. state->refineIterations[7] = settings->refineIterations[7];
  1653. // mode45
  1654. state->mode_selection[2] = settings->mode_selection[2];
  1655. state->mode45_channel0 = settings->mode45_channel0;
  1656. state->refineIterations_channel = settings->refineIterations_channel;
  1657. state->refineIterations[4] = settings->refineIterations[4];
  1658. state->refineIterations[5] = settings->refineIterations[5];
  1659. // mode6
  1660. state->mode_selection[3] = settings->mode_selection[3];
  1661. state->refineIterations[6] = settings->refineIterations[6];
  1662. }
  1663. inline void CompressBlockBC7(uniform rgba_surface src[], int xx, uniform int yy, uniform uint8 dst[],
  1664. uniform bc7_enc_settings settings[])
  1665. {
  1666. bc7_enc_state _state;
  1667. varying bc7_enc_state* uniform state = &_state;
  1668. bc7_enc_copy_settings(state, settings);
  1669. load_block_interleaved_rgba(state->block, src, xx, yy);
  1670. state->best_err = FLT_MAX;
  1671. state->opaque_err = compute_opaque_err(state->block, state->channels);
  1672. CompressBlockBC7_core(state);
  1673. store_data(dst, src->width, xx, yy, state->best_data, 4);
  1674. }
  1675. export void CompressBlocksBC7_ispc(uniform rgba_surface src[], uniform uint8 dst[], uniform bc7_enc_settings settings[])
  1676. {
  1677. const int h_4=src->height/4, w_4=src->width/4;
  1678. for (uniform int yy = 0; yy<h_4; yy++)
  1679. for (uniform int xx = 0; xx<w_4; xx++) // foreach (xx = 0 ... src->width/4)
  1680. {
  1681. CompressBlockBC7(src, xx, yy, dst, settings);
  1682. }
  1683. }
  1684. ///////////////////////////////////////////////////////////
  1685. // BC6H encoding
  1686. /*struct bc6h_enc_settings
  1687. {
  1688. bool slow_mode;
  1689. bool fast_mode;
  1690. int refineIterations_1p;
  1691. int refineIterations_2p;
  1692. int fastSkipTreshold;
  1693. };*/
  1694. struct bc6h_enc_state
  1695. {
  1696. float block[64];
  1697. float best_err;
  1698. uint32 best_data[5]; // 4, +1 margin for skips
  1699. float rgb_bounds[6];
  1700. float max_span;
  1701. int max_span_idx;
  1702. int mode;
  1703. int epb;
  1704. int qbounds[8];
  1705. // settings
  1706. uniform bool slow_mode;
  1707. uniform bool fast_mode;
  1708. uniform int refineIterations_1p;
  1709. uniform int refineIterations_2p;
  1710. uniform int fastSkipTreshold;
  1711. };
  1712. void bc6h_code_2p(uint32 data[5], int pqep[], uint32 qblock[2], int part_id, int mode);
  1713. void bc6h_code_1p(uint32 data[5], int qep[8], uint32 qblock[2], int mode);
  1714. ///////////////////////////
  1715. // BC6H format data
  1716. static uniform const int mode_prefix_table[] =
  1717. {
  1718. 0, 1, 2, 6, 10, 14, 18, 22, 26, 30, 3, 7, 11, 15
  1719. };
  1720. inline uniform int get_mode_prefix(uniform int mode)
  1721. {
  1722. return mode_prefix_table[mode];
  1723. }
  1724. static uniform const float span_table[] =
  1725. {
  1726. 0.9 * 0xFFFF / 64, // (0) 4 / 10
  1727. 0.9 * 0xFFFF / 4, // (1) 5 / 7
  1728. 0.8 * 0xFFFF / 256, // (2) 3 / 11
  1729. -1, -1,
  1730. 0.9 * 0xFFFF / 32, // (5) 4 / 9
  1731. 0.9 * 0xFFFF / 16, // (6) 4 / 8
  1732. -1, -1,
  1733. 0xFFFF, // (9) absolute
  1734. 0xFFFF, // (10) absolute
  1735. 0.95 * 0xFFFF / 8, // (11) 8 / 11
  1736. 0.95 * 0xFFFF / 32, // (12) 7 / 12
  1737. 6, // (13) 3 / 16
  1738. };
  1739. inline uniform float get_span(uniform int mode)
  1740. {
  1741. uniform int span = span_table[mode];
  1742. assert(span > 0);
  1743. return span;
  1744. }
  1745. static uniform const int mode_bits_table[] =
  1746. {
  1747. 10, 7, 11, -1, -1,
  1748. 9, 8, -1, -1, 6,
  1749. 10, 11, 12, 16,
  1750. };
  1751. inline uniform int get_mode_bits(uniform int mode)
  1752. {
  1753. uniform int mode_bits = mode_bits_table[mode];
  1754. assert(mode_bits > 0);
  1755. return mode_bits;
  1756. }
  1757. ///////////////////////////
  1758. // endpoint quantization
  1759. inline int unpack_to_uf16(uint32 v, int bits)
  1760. {
  1761. if (bits >= 15) return v;
  1762. if (v == 0) return 0;
  1763. if (v == (1<<bits)-1) return 0xFFFF;
  1764. return (v * 2 + 1) << (15-bits);
  1765. }
  1766. void ep_quant_bc6h(int qep[], float ep[], int bits, uniform int pairs)
  1767. {
  1768. int levels = 1 << bits;
  1769. for (uniform int i = 0; i < 8 * pairs; i++)
  1770. {
  1771. int v = ((int)(ep[i] / (256 * 256.f - 1) * (levels - 1) + 0.5f));
  1772. qep[i] = clamp(v, 0, levels - 1);
  1773. }
  1774. }
  1775. void ep_dequant_bc6h(float ep[], int qep[], int bits, uniform int pairs)
  1776. {
  1777. for (uniform int i = 0; i < 8 * pairs; i++)
  1778. ep[i] = unpack_to_uf16(qep[i], bits);
  1779. }
  1780. void ep_quant_dequant_bc6h(bc6h_enc_state state[], int qep[], float ep[], uniform int pairs)
  1781. {
  1782. int bits = state->epb;
  1783. ep_quant_bc6h(qep, ep, bits, pairs);
  1784. for (uniform int i = 0; i < 2 * pairs; i++)
  1785. for (uniform int p = 0; p < 3; p++)
  1786. {
  1787. qep[i * 4 + p] = clamp(qep[i * 4 + p], state->qbounds[p], state->qbounds[4 + p]);
  1788. }
  1789. ep_dequant_bc6h(ep, qep, bits, pairs);
  1790. }
  1791. //////////////////////////
  1792. // parameter estimation
  1793. float bc6h_enc_2p_part_fast(bc6h_enc_state state[], int qep[16], uint32 qblock[2], int part_id)
  1794. {
  1795. uint32 pattern = get_pattern(part_id);
  1796. uniform int bits = 3;
  1797. uniform int pairs = 2;
  1798. uniform int channels = 3;
  1799. float ep[16];
  1800. for (uniform int j = 0; j<pairs; j++)
  1801. {
  1802. int mask = get_pattern_mask(part_id, j);
  1803. block_segment_core(&ep[j * 8], state->block, mask, channels);
  1804. }
  1805. ep_quant_dequant_bc6h(state, qep, ep, 2);
  1806. float total_err = block_quant(qblock, state->block, bits, ep, pattern, channels);
  1807. return total_err;
  1808. }
  1809. void bc6h_enc_2p_list(bc6h_enc_state state[], int part_list[], uniform int part_count)
  1810. {
  1811. if (part_count == 0) return;
  1812. uniform int bits = 3;
  1813. uniform int pairs = 2;
  1814. uniform int channels = 3;
  1815. int best_qep[24];
  1816. uint32 best_qblock[2];
  1817. int best_part_id = -1;
  1818. float best_err = FLT_MAX;
  1819. for (uniform int part = 0; part<part_count; part++)
  1820. {
  1821. int part_id = part_list[part] & 31;
  1822. int qep[24];
  1823. uint32 qblock[2];
  1824. float err = bc6h_enc_2p_part_fast(state, qep, qblock, part_id);
  1825. if (err<best_err)
  1826. {
  1827. for (uniform int i = 0; i<8 * pairs; i++) best_qep[i] = qep[i];
  1828. for (uniform int k = 0; k<2; k++) best_qblock[k] = qblock[k];
  1829. best_part_id = part_id;
  1830. best_err = err;
  1831. }
  1832. }
  1833. // refine
  1834. uniform int refineIterations = state->refineIterations_2p;
  1835. for (uniform int _ = 0; _<refineIterations; _++)
  1836. {
  1837. float ep[24];
  1838. for (uniform int j = 0; j<pairs; j++)
  1839. {
  1840. int mask = get_pattern_mask(best_part_id, j);
  1841. opt_endpoints(&ep[j * 8], state->block, bits, best_qblock, mask, channels);
  1842. }
  1843. int qep[24];
  1844. uint32 qblock[2];
  1845. ep_quant_dequant_bc6h(state, qep, ep, 2);
  1846. uint32 pattern = get_pattern(best_part_id);
  1847. float err = block_quant(qblock, state->block, bits, ep, pattern, channels);
  1848. if (err<best_err)
  1849. {
  1850. for (uniform int i = 0; i<8 * pairs; i++) best_qep[i] = qep[i];
  1851. for (uniform int k = 0; k<2; k++) best_qblock[k] = qblock[k];
  1852. best_err = err;
  1853. }
  1854. }
  1855. if (best_err<state->best_err)
  1856. {
  1857. state->best_err = best_err;
  1858. bc6h_code_2p(state->best_data, best_qep, best_qblock, best_part_id, state->mode);
  1859. }
  1860. }
  1861. void bc6h_enc_2p(bc6h_enc_state state[])
  1862. {
  1863. float full_stats[15];
  1864. compute_stats_masked(full_stats, state->block, -1, 3);
  1865. int part_list[32];
  1866. for (uniform int part = 0; part < 32; part++)
  1867. {
  1868. int mask = get_pattern_mask(part, 0);
  1869. float bound12 = block_pca_bound_split(state->block, mask, full_stats, 3);
  1870. int bound = (int)(bound12);
  1871. part_list[part] = part + bound * 64;
  1872. }
  1873. partial_sort_list(part_list, 32, state->fastSkipTreshold);
  1874. bc6h_enc_2p_list(state, part_list, state->fastSkipTreshold);
  1875. }
  1876. void bc6h_enc_1p(bc6h_enc_state state[])
  1877. {
  1878. float ep[8];
  1879. block_segment_core(ep, state->block, -1, 3);
  1880. int qep[8];
  1881. ep_quant_dequant_bc6h(state, qep, ep, 1);
  1882. uint32 qblock[2];
  1883. float err = block_quant(qblock, state->block, 4, ep, 0, 3);
  1884. // refine
  1885. uniform int refineIterations = state->refineIterations_1p;
  1886. for (uniform int i = 0; i<refineIterations; i++)
  1887. {
  1888. opt_endpoints(ep, state->block, 4, qblock, -1, 3);
  1889. ep_quant_dequant_bc6h(state, qep, ep, 1);
  1890. err = block_quant(qblock, state->block, 4, ep, 0, 3);
  1891. }
  1892. if (err < state->best_err)
  1893. {
  1894. state->best_err = err;
  1895. bc6h_code_1p(state->best_data, qep, qblock, state->mode);
  1896. }
  1897. }
  1898. inline void compute_qbounds(bc6h_enc_state state[], float rgb_span[3])
  1899. {
  1900. float bounds[8];
  1901. for (uniform int p = 0; p < 3; p++)
  1902. {
  1903. float middle = (state->rgb_bounds[p] + state->rgb_bounds[3 + p]) / 2;
  1904. bounds[ p] = middle - rgb_span[p] / 2;
  1905. bounds[4+p] = middle + rgb_span[p] / 2;
  1906. }
  1907. ep_quant_bc6h(state->qbounds, bounds, state->epb, 1);
  1908. }
  1909. void compute_qbounds(bc6h_enc_state state[], float span)
  1910. {
  1911. float rgb_span[3] = { span, span, span };
  1912. compute_qbounds(state, rgb_span);
  1913. }
  1914. void compute_qbounds2(bc6h_enc_state state[], float span, int max_span_idx)
  1915. {
  1916. float rgb_span[3] = { span, span, span };
  1917. for (uniform int p = 0; p < 3; p++)
  1918. {
  1919. rgb_span[p] *= (p == max_span_idx) ? 2 : 1;
  1920. }
  1921. compute_qbounds(state, rgb_span);
  1922. }
  1923. void bc6h_test_mode(bc6h_enc_state state[], uniform int mode, uniform bool enc, uniform float margin)
  1924. {
  1925. uniform int mode_bits = get_mode_bits(mode);
  1926. uniform float span = get_span(mode);
  1927. float max_span = state->max_span;
  1928. int max_span_idx = state->max_span_idx;
  1929. if (max_span * margin > span) return;
  1930. if (mode >= 10)
  1931. {
  1932. state->epb = mode_bits;
  1933. state->mode = mode;
  1934. compute_qbounds(state, span);
  1935. if (enc) bc6h_enc_1p(state);
  1936. }
  1937. else if (mode <= 1 || mode == 5 || mode == 9)
  1938. {
  1939. state->epb = mode_bits;
  1940. state->mode = mode;
  1941. compute_qbounds(state, span);
  1942. if (enc) bc6h_enc_2p(state);
  1943. }
  1944. else
  1945. {
  1946. state->epb = mode_bits;
  1947. state->mode = mode + max_span_idx;
  1948. compute_qbounds2(state, span, max_span_idx);
  1949. if (enc) bc6h_enc_2p(state);
  1950. }
  1951. }
  1952. //////////////////////////
  1953. // BC6H bitstream coding
  1954. int bit_at(int v, uniform int pos)
  1955. {
  1956. return (v >> pos) & 1;
  1957. }
  1958. uint32 reverse_bits(uint32 v, uniform int bits)
  1959. {
  1960. if (bits == 2)
  1961. {
  1962. return (v >> 1) + (v & 1) * 2;
  1963. }
  1964. if (bits == 6)
  1965. {
  1966. v = (v & 0x5555) * 2 + ((v >> 1) & 0x5555);
  1967. return (v >> 4) + ((v >> 2) & 3) * 4 + (v & 3) * 16;
  1968. }
  1969. return 0; // ESENTHEL CHANGED to silence warning
  1970. }
  1971. void bc6h_pack(uint32 packed[], int qep[], int mode)
  1972. {
  1973. if (mode == 0)
  1974. {
  1975. int pred_qep[16];
  1976. for (uniform int p = 0; p < 3; p++)
  1977. {
  1978. pred_qep[ p] = qep[p];
  1979. pred_qep[ 4 + p] = (qep[ 4 + p] - qep[p]) & 31;
  1980. pred_qep[ 8 + p] = (qep[ 8 + p] - qep[p]) & 31;
  1981. pred_qep[12 + p] = (qep[12 + p] - qep[p]) & 31;
  1982. }
  1983. for (uniform int i = 1; i < 4; i++)
  1984. for (uniform int p = 0; p < 3; p++)
  1985. {
  1986. assert( qep[i * 4 + p] - qep[p] <= 15);
  1987. assert(-16 <= qep[i * 4 + p] - qep[p]);
  1988. }
  1989. /*
  1990. g2[4], b2[4], b3[4],
  1991. r0[9:0],
  1992. g0[9:0],
  1993. b0[9:0],
  1994. r1[4:0], g3[4], g2[3:0],
  1995. g1[4:0], b3[0], g3[3:0],
  1996. b1[4:0], b3[1], b2[3:0],
  1997. r2[4:0], b3[2],
  1998. r3[4:0], b3[3]
  1999. */
  2000. uint32 pqep[10];
  2001. pqep[4] = pred_qep[4] + (pred_qep[ 8 + 1] & 15) * 64;
  2002. pqep[5] = pred_qep[5] + (pred_qep[12 + 1] & 15) * 64;
  2003. pqep[6] = pred_qep[6] + (pred_qep[ 8 + 2] & 15) * 64;
  2004. pqep[4] += bit_at(pred_qep[12 + 1], 4) << 5;
  2005. pqep[5] += bit_at(pred_qep[12 + 2], 0) << 5;
  2006. pqep[6] += bit_at(pred_qep[12 + 2], 1) << 5;
  2007. pqep[8] = pred_qep[ 8] + bit_at(pred_qep[12 + 2], 2) * 32;
  2008. pqep[9] = pred_qep[12] + bit_at(pred_qep[12 + 2], 3) * 32;
  2009. packed[0] = get_mode_prefix(0);
  2010. packed[0] += bit_at(pred_qep[ 8 + 1], 4) << 2;
  2011. packed[0] += bit_at(pred_qep[ 8 + 2], 4) << 3;
  2012. packed[0] += bit_at(pred_qep[12 + 2], 4) << 4;
  2013. packed[1] = (pred_qep[2] << 20) + (pred_qep[1] << 10) + pred_qep[0];
  2014. packed[2] = (pqep[6] << 20) + (pqep[5] << 10) + pqep[4];
  2015. packed[3] = (pqep[9] << 6) + pqep[8];
  2016. }
  2017. else if (mode == 1)
  2018. {
  2019. int pred_qep[16];
  2020. for (uniform int p = 0; p < 3; p++)
  2021. {
  2022. pred_qep[ p] = qep[p];
  2023. pred_qep[ 4 + p] = (qep[ 4 + p] - qep[p]) & 63;
  2024. pred_qep[ 8 + p] = (qep[ 8 + p] - qep[p]) & 63;
  2025. pred_qep[12 + p] = (qep[12 + p] - qep[p]) & 63;
  2026. }
  2027. for (uniform int i = 1; i < 4; i++)
  2028. for (uniform int p = 0; p < 3; p++)
  2029. {
  2030. assert( qep[i * 4 + p] - qep[p] <= 31);
  2031. assert(-32 <= qep[i * 4 + p] - qep[p]);
  2032. }
  2033. /*
  2034. g2[5], g3[4], g3[5],
  2035. r0[6:0], b3[0], b3[1], b2[4],
  2036. g0[6:0], b2[5], b3[2], g2[4],
  2037. b0[6:0], b3[3], b3[5], b3[4],
  2038. r1[5:0], g2[3:0],
  2039. g1[5:0], g3[3:0],
  2040. b1[5:0], b2[3:0],
  2041. r2[5:0],
  2042. r3[5:0]
  2043. */
  2044. uint32 pqep[8];
  2045. pqep[0] = pred_qep[0];
  2046. pqep[0] += bit_at(pred_qep[12 + 2], 0) << 7;
  2047. pqep[0] += bit_at(pred_qep[12 + 2], 1) << 8;
  2048. pqep[0] += bit_at(pred_qep[ 8 + 2], 4) << 9;
  2049. pqep[1] = pred_qep[1];
  2050. pqep[1] += bit_at(pred_qep[ 8 + 2], 5) << 7;
  2051. pqep[1] += bit_at(pred_qep[12 + 2], 2) << 8;
  2052. pqep[1] += bit_at(pred_qep[ 8 + 1], 4) << 9;
  2053. pqep[2] = pred_qep[2];
  2054. pqep[2] += bit_at(pred_qep[12 + 2], 3) << 7;
  2055. pqep[2] += bit_at(pred_qep[12 + 2], 5) << 8;
  2056. pqep[2] += bit_at(pred_qep[12 + 2], 4) << 9;
  2057. pqep[4] = pred_qep[4] + (pred_qep[ 8 + 1] & 15) * 64;
  2058. pqep[5] = pred_qep[5] + (pred_qep[12 + 1] & 15) * 64;
  2059. pqep[6] = pred_qep[6] + (pred_qep[ 8 + 2] & 15) * 64;
  2060. packed[0] = get_mode_prefix(1);
  2061. packed[0] += bit_at(pred_qep[ 8 + 1], 5) << 2;
  2062. packed[0] += bit_at(pred_qep[12 + 1], 4) << 3;
  2063. packed[0] += bit_at(pred_qep[12 + 1], 5) << 4;
  2064. packed[1] = (pqep[2] << 20) + (pqep[1] << 10) + pqep[0];
  2065. packed[2] = (pqep[6] << 20) + (pqep[5] << 10) + pqep[4];
  2066. packed[3] = (pred_qep[12] << 6) + pred_qep[8];
  2067. }
  2068. else if (mode == 2 || mode == 3 || mode == 4)
  2069. {
  2070. /*
  2071. r0[9:0], g0[9:0], b0[9:0],
  2072. r1[3:0], xx[y], xx[y], g2[3:0],
  2073. g1[3:0], xx[y], xx[y], g3[3:0],
  2074. b1[3:0], xx[y], xx[y], b2[3:0],
  2075. r2[3:0], xx[y], xx[y],
  2076. r3[3:0], xx[y], xx[y]
  2077. */
  2078. int dqep[16];
  2079. for (uniform int p = 0; p < 3; p++)
  2080. {
  2081. int mask = 15;
  2082. if (p == mode - 2) mask = 31;
  2083. dqep[p] = qep[p];
  2084. dqep[ 4 + p] = (qep[ 4 + p] - qep[p]) & mask;
  2085. dqep[ 8 + p] = (qep[ 8 + p] - qep[p]) & mask;
  2086. dqep[12 + p] = (qep[12 + p] - qep[p]) & mask;
  2087. }
  2088. for (uniform int i = 1; i < 4; i++)
  2089. for (uniform int p = 0; p < 3; p++)
  2090. {
  2091. int bits = 4;
  2092. if (p == mode - 2) bits = 5;
  2093. assert( qep[i * 4 + p] - qep[p] <= (1<<bits)/2 - 1);
  2094. assert(-(1<<bits)/2 <= qep[i * 4 + p] - qep[p]);
  2095. }
  2096. uint32 pqep[10];
  2097. pqep[0] = dqep[0] & 1023;
  2098. pqep[1] = dqep[1] & 1023;
  2099. pqep[2] = dqep[2] & 1023;
  2100. pqep[4] = dqep[4] + (dqep[ 8 + 1] & 15) * 64;
  2101. pqep[5] = dqep[5] + (dqep[12 + 1] & 15) * 64;
  2102. pqep[6] = dqep[6] + (dqep[ 8 + 2] & 15) * 64;
  2103. pqep[8] = dqep[8];
  2104. pqep[9] = dqep[12];
  2105. if (mode == 2)
  2106. {
  2107. /*
  2108. r0[9:0], g0[9:0], b0[9:0],
  2109. r1[3:0], r1[4], r0[10], g2[3:0],
  2110. g1[3:0], g0[10], b3[0], g3[3:0],
  2111. b1[3:0], b0[10], b3[1], b2[3:0],
  2112. r2[3:0], r2[4], b3[2],
  2113. r3[3:0], r3[4], b3[3]
  2114. */
  2115. packed[0] = get_mode_prefix(2);
  2116. //
  2117. pqep[5] += bit_at(dqep[0 + 1], 10) << 4;
  2118. pqep[6] += bit_at(dqep[0 + 2], 10) << 4;
  2119. //
  2120. //
  2121. pqep[4] += bit_at(dqep[0 + 0], 10) << 5;
  2122. pqep[5] += bit_at(dqep[12 + 2], 0) << 5;
  2123. pqep[6] += bit_at(dqep[12 + 2], 1) << 5;
  2124. pqep[8] += bit_at(dqep[12 + 2], 2) << 5;
  2125. pqep[9] += bit_at(dqep[12 + 2], 3) << 5;
  2126. }
  2127. if (mode == 3)
  2128. {
  2129. /*
  2130. r0[9:0], g0[9:0], b0[9:0],
  2131. r1[3:0], r0[10], g3[4], g2[3:0],
  2132. g1[3:0], g1[4], g0[10], g3[3:0],
  2133. b1[3:0], b0[10], b3[1], b2[3:0],
  2134. r2[3:0], b3[0], b3[2],
  2135. r3[3:0], g2[4], b3[3]
  2136. */
  2137. packed[0] = get_mode_prefix(3);
  2138. pqep[4] += bit_at(dqep[0 + 0], 10) << 4;
  2139. //
  2140. pqep[6] += bit_at(dqep[0 + 2], 10) << 4;
  2141. pqep[8] += bit_at(dqep[12 + 2], 0) << 4;
  2142. pqep[9] += bit_at(dqep[ 8 + 1], 4) << 4;
  2143. pqep[4] += bit_at(dqep[12 + 1], 4) << 5;
  2144. pqep[5] += bit_at(dqep[0 + 1], 10) << 5;
  2145. pqep[6] += bit_at(dqep[12 + 2], 1) << 5;
  2146. pqep[8] += bit_at(dqep[12 + 2], 2) << 5;
  2147. pqep[9] += bit_at(dqep[12 + 2], 3) << 5;
  2148. }
  2149. if (mode == 4)
  2150. {
  2151. /*
  2152. r0[9:0], g0[9:0], b0[9:0],
  2153. r1[3:0], r0[10], b2[4], g2[3:0],
  2154. g1[3:0], g0[10], b3[0], g3[3:0],
  2155. b1[3:0], b1[4], b0[10], b2[3:0],
  2156. r2[3:0], b3[1], b3[2],
  2157. r3[3:0], b3[4], b3[3]
  2158. */
  2159. packed[0] = get_mode_prefix(4);
  2160. pqep[4] += bit_at(dqep[0 + 0], 10) << 4;
  2161. pqep[5] += bit_at(dqep[0 + 1], 10) << 4;
  2162. //
  2163. pqep[8] += bit_at(dqep[12 + 2], 1) << 4;
  2164. pqep[9] += bit_at(dqep[12 + 2], 4) << 4;
  2165. pqep[4] += bit_at(dqep[ 8 + 2], 4) << 5;
  2166. pqep[5] += bit_at(dqep[12 + 2], 0) << 5;
  2167. pqep[6] += bit_at(dqep[0 + 2], 10) << 5;
  2168. pqep[8] += bit_at(dqep[12 + 2], 2) << 5;
  2169. pqep[9] += bit_at(dqep[12 + 2], 3) << 5;
  2170. }
  2171. packed[1] = (pqep[2] << 20) + (pqep[1] << 10) + pqep[0];
  2172. packed[2] = (pqep[6] << 20) + (pqep[5] << 10) + pqep[4];
  2173. packed[3] = (pqep[9] << 6) + pqep[8];
  2174. }
  2175. else if (mode == 5)
  2176. {
  2177. int dqep[16];
  2178. for (uniform int p = 0; p < 3; p++)
  2179. {
  2180. dqep[p] = qep[p];
  2181. dqep[ 4 + p] = (qep[ 4 + p] - qep[p]) & 31;
  2182. dqep[ 8 + p] = (qep[ 8 + p] - qep[p]) & 31;
  2183. dqep[12 + p] = (qep[12 + p] - qep[p]) & 31;
  2184. }
  2185. for (uniform int i = 1; i < 4; i++)
  2186. for (uniform int p = 0; p < 3; p++)
  2187. {
  2188. assert( qep[i * 4 + p] - qep[p] <= 15);
  2189. assert(-16 <= qep[i * 4 + p] - qep[p]);
  2190. }
  2191. /*
  2192. r0[8:0], b2[4],
  2193. g0[8:0], g2[4],
  2194. b0[8:0], b3[4],
  2195. r1[4:0], g3[4], g2[3:0],
  2196. g1[4:0], b3[0], g3[3:0],
  2197. b1[4:0], b3[1], b2[3:0],
  2198. r2[4:0], b3[2],
  2199. r3[4:0], b3[3]
  2200. */
  2201. uint32 pqep[10];
  2202. pqep[0] = dqep[0];
  2203. pqep[1] = dqep[1];
  2204. pqep[2] = dqep[2];
  2205. pqep[4] = dqep[4] + (dqep[ 8 + 1] & 15) * 64;
  2206. pqep[5] = dqep[5] + (dqep[12 + 1] & 15) * 64;
  2207. pqep[6] = dqep[6] + (dqep[ 8 + 2] & 15) * 64;
  2208. pqep[8] = dqep[8];
  2209. pqep[9] = dqep[12];
  2210. pqep[0] += bit_at(dqep[ 8 + 2], 4) << 9;
  2211. pqep[1] += bit_at(dqep[ 8 + 1], 4) << 9;
  2212. pqep[2] += bit_at(dqep[12 + 2], 4) << 9;
  2213. pqep[4] += bit_at(dqep[12 + 1], 4) << 5;
  2214. pqep[5] += bit_at(dqep[12 + 2], 0) << 5;
  2215. pqep[6] += bit_at(dqep[12 + 2], 1) << 5;
  2216. pqep[8] += bit_at(dqep[12 + 2], 2) << 5;
  2217. pqep[9] += bit_at(dqep[12 + 2], 3) << 5;
  2218. packed[0] = get_mode_prefix(5);
  2219. packed[1] = (pqep[2] << 20) + (pqep[1] << 10) + pqep[0];
  2220. packed[2] = (pqep[6] << 20) + (pqep[5] << 10) + pqep[4];
  2221. packed[3] = (pqep[9] << 6) + pqep[8];
  2222. }
  2223. else if (mode == 6 || mode == 7 || mode == 8)
  2224. {
  2225. /*
  2226. r0[7:0], xx[y], b2[4],
  2227. g0[7:0], xx[y], g2[4],
  2228. b0[7:0], xx[y], b3[4],
  2229. r1[4:0], xx[y], g2[3:0],
  2230. g1[4:0], xx[y], g3[3:0],
  2231. b1[4:0], xx[y], b2[3:0],
  2232. r2[4:0], xx[y],
  2233. r3[4:0], xx[y]
  2234. */
  2235. int dqep[16];
  2236. for (uniform int p = 0; p < 3; p++)
  2237. {
  2238. int mask = 31;
  2239. if (p == mode - 6) mask = 63;
  2240. dqep[p] = qep[p];
  2241. dqep[ 4 + p] = (qep[ 4 + p] - qep[p]) & mask;
  2242. dqep[ 8 + p] = (qep[ 8 + p] - qep[p]) & mask;
  2243. dqep[12 + p] = (qep[12 + p] - qep[p]) & mask;
  2244. }
  2245. for (uniform int i = 1; i < 4; i++)
  2246. for (uniform int p = 0; p < 3; p++)
  2247. {
  2248. int bits = 5;
  2249. if (p == mode - 6) bits = 6;
  2250. assert( qep[i * 4 + p] - qep[p] <= (1<<bits)/2 - 1);
  2251. assert(-(1<<bits)/2 <= qep[i * 4 + p] - qep[p]);
  2252. }
  2253. uint32 pqep[10];
  2254. pqep[0] = dqep[0];
  2255. pqep[0] += bit_at(dqep[ 8 + 2], 4) << 9;
  2256. pqep[1] = dqep[1];
  2257. pqep[1] += bit_at(dqep[ 8 + 1], 4) << 9;
  2258. pqep[2] = dqep[2];
  2259. pqep[2] += bit_at(dqep[12 + 2], 4) << 9;
  2260. pqep[4] = dqep[4] + (dqep[ 8 + 1] & 15) * 64;
  2261. pqep[5] = dqep[5] + (dqep[12 + 1] & 15) * 64;
  2262. pqep[6] = dqep[6] + (dqep[ 8 + 2] & 15) * 64;
  2263. pqep[8] = dqep[8];
  2264. pqep[9] = dqep[12];
  2265. if (mode == 6)
  2266. {
  2267. /*
  2268. r0[7:0], g3[4], b2[4],
  2269. g0[7:0], b3[2], g2[4],
  2270. b0[7:0], b3[3], b3[4],
  2271. r1[4:0], r1[5], g2[3:0],
  2272. g1[4:0], b3[0], g3[3:0],
  2273. b1[4:0], b3[1], b2[3:0],
  2274. r2[5:0],
  2275. r3[5:0]
  2276. */
  2277. packed[0] = get_mode_prefix(6);
  2278. pqep[0] += bit_at(dqep[12 + 1], 4) << 8;
  2279. pqep[1] += bit_at(dqep[12 + 2], 2) << 8;
  2280. pqep[2] += bit_at(dqep[12 + 2], 3) << 8;
  2281. //
  2282. pqep[5] += bit_at(dqep[12 + 2], 0) << 5;
  2283. pqep[6] += bit_at(dqep[12 + 2], 1) << 5;
  2284. //
  2285. //
  2286. }
  2287. if (mode == 7)
  2288. {
  2289. /*
  2290. r0[7:0], b3[0], b2[4],
  2291. g0[7:0], g2[5], g2[4],
  2292. b0[7:0], g3[5], b3[4],
  2293. r1[4:0], g3[4], g2[3:0],
  2294. g1[4:0], g1[5], g3[3:0],
  2295. b1[4:0], b3[1], b2[3:0],
  2296. r2[4:0], b3[2],
  2297. r3[4:0], b3[3]
  2298. */
  2299. packed[0] = get_mode_prefix(7);
  2300. pqep[0] += bit_at(dqep[12 + 2], 0) << 8;
  2301. pqep[1] += bit_at(dqep[ 8 + 1], 5) << 8;
  2302. pqep[2] += bit_at(dqep[12 + 1], 5) << 8;
  2303. pqep[4] += bit_at(dqep[12 + 1], 4) << 5;
  2304. //
  2305. pqep[6] += bit_at(dqep[12 + 2], 1) << 5;
  2306. pqep[8] += bit_at(dqep[12 + 2], 2) << 5;
  2307. pqep[9] += bit_at(dqep[12 + 2], 3) << 5;
  2308. }
  2309. if (mode == 8)
  2310. {
  2311. /*
  2312. r0[7:0], b3[1], b2[4],
  2313. g0[7:0], b2[5], g2[4],
  2314. b0[7:0], b3[5], b3[4],
  2315. r1[4:0], g3[4], g2[3:0],
  2316. g1[4:0], b3[0], g3[3:0],
  2317. b1[4:0], b1[5], b2[3:0],
  2318. r2[4:0], b3[2],
  2319. r3[4:0], b3[3]
  2320. */
  2321. packed[0] = get_mode_prefix(8);
  2322. pqep[0] += bit_at(dqep[12 + 2], 1) << 8;
  2323. pqep[1] += bit_at(dqep[ 8 + 2], 5) << 8;
  2324. pqep[2] += bit_at(dqep[12 + 2], 5) << 8;
  2325. pqep[4] += bit_at(dqep[12 + 1], 4) << 5;
  2326. pqep[5] += bit_at(dqep[12 + 2], 0) << 5;
  2327. //
  2328. pqep[8] += bit_at(dqep[12 + 2], 2) << 5;
  2329. pqep[9] += bit_at(dqep[12 + 2], 3) << 5;
  2330. }
  2331. packed[1] = (pqep[2] << 20) + (pqep[1] << 10) + pqep[0];
  2332. packed[2] = (pqep[6] << 20) + (pqep[5] << 10) + pqep[4];
  2333. packed[3] = (pqep[9] << 6) + pqep[8];
  2334. }
  2335. else if (mode == 9)
  2336. {
  2337. /*
  2338. r0[5:0], g3[4], b3[0], b3[1], b2[4], // 10
  2339. g0[5:0], g2[5], b2[5], b3[2], g2[4], // 10
  2340. b0[5:0], g3[5], b3[3], b3[5], b3[4], // 10
  2341. r1[5:0], g2[3:0], // 10
  2342. g1[5:0], g3[3:0], // 10
  2343. b1[5:0], b2[3:0], // 10
  2344. r2[5:0], // 6
  2345. r3[5:0] // 6
  2346. */
  2347. uint32 pqep[10];
  2348. pqep[0] = qep[0];
  2349. pqep[0] += bit_at(qep[12 + 1], 4) << 6;
  2350. pqep[0] += bit_at(qep[12 + 2], 0) << 7;
  2351. pqep[0] += bit_at(qep[12 + 2], 1) << 8;
  2352. pqep[0] += bit_at(qep[ 8 + 2], 4) << 9;
  2353. pqep[1] = qep[1];
  2354. pqep[1] += bit_at(qep[ 8 + 1], 5) << 6;
  2355. pqep[1] += bit_at(qep[ 8 + 2], 5) << 7;
  2356. pqep[1] += bit_at(qep[12 + 2], 2) << 8;
  2357. pqep[1] += bit_at(qep[ 8 + 1], 4) << 9;
  2358. pqep[2] = qep[2];
  2359. pqep[2] += bit_at(qep[12 + 1], 5) << 6;
  2360. pqep[2] += bit_at(qep[12 + 2], 3) << 7;
  2361. pqep[2] += bit_at(qep[12 + 2], 5) << 8;
  2362. pqep[2] += bit_at(qep[12 + 2], 4) << 9;
  2363. pqep[4] = qep[4] + (qep[ 8 + 1] & 15) * 64;
  2364. pqep[5] = qep[5] + (qep[12 + 1] & 15) * 64;
  2365. pqep[6] = qep[6] + (qep[ 8 + 2] & 15) * 64;
  2366. packed[0] = get_mode_prefix(9);
  2367. packed[1] = (pqep[2] << 20) + (pqep[1] << 10) + pqep[0];
  2368. packed[2] = (pqep[6] << 20) + (pqep[5] << 10) + pqep[4];
  2369. packed[3] = (qep[12] << 6) + qep[8];
  2370. }
  2371. else if (mode == 10)
  2372. {
  2373. // the only mode with nothing to do ~
  2374. packed[0] = get_mode_prefix(10);
  2375. packed[1] = (qep[2] << 20) + (qep[1] << 10) + qep[0];
  2376. packed[2] = (qep[6] << 20) + (qep[5] << 10) + qep[4];
  2377. }
  2378. else if (mode == 11)
  2379. {
  2380. int dqep[8];
  2381. for (uniform int p = 0; p < 3; p++)
  2382. {
  2383. dqep[p] = qep[p];
  2384. dqep[4 + p] = (qep[4 + p] - qep[p]) & 511;
  2385. }
  2386. for (uniform int i = 1; i < 2; i++)
  2387. for (uniform int p = 0; p < 3; p++)
  2388. {
  2389. assert( qep[i * 4 + p] - qep[p] <= 255);
  2390. assert(-256 <= qep[i * 4 + p] - qep[p]);
  2391. }
  2392. /*
  2393. r0[9:0], g0[9:0], b0[9:0],
  2394. r1[8:0], r0[10],
  2395. g1[8:0], g0[10],
  2396. b1[8:0], b0[10]
  2397. */
  2398. uint32 pqep[8];
  2399. pqep[0] = dqep[0] & 1023;
  2400. pqep[1] = dqep[1] & 1023;
  2401. pqep[2] = dqep[2] & 1023;
  2402. pqep[4] = dqep[4] + (dqep[0] >> 10) * 512;
  2403. pqep[5] = dqep[5] + (dqep[1] >> 10) * 512;
  2404. pqep[6] = dqep[6] + (dqep[2] >> 10) * 512;
  2405. packed[0] = get_mode_prefix(11);
  2406. packed[1] = (pqep[2] << 20) + (pqep[1] << 10) + pqep[0];
  2407. packed[2] = (pqep[6] << 20) + (pqep[5] << 10) + pqep[4];
  2408. }
  2409. else if (mode == 12)
  2410. {
  2411. int dqep[8];
  2412. for (uniform int p = 0; p < 3; p++)
  2413. {
  2414. dqep[p] = qep[p];
  2415. dqep[4 + p] = (qep[4 + p] - qep[p]) & 255;
  2416. }
  2417. for (uniform int i = 1; i < 2; i++)
  2418. for (uniform int p = 0; p < 3; p++)
  2419. {
  2420. assert( qep[i * 4 + p] - qep[p] <= 127);
  2421. assert(-128 <= qep[i * 4 + p] - qep[p]);
  2422. }
  2423. /*
  2424. r0[9:0], g0[9:0], b0[9:0],
  2425. r1[7:0], r0[10:11],
  2426. g1[7:0], g0[10:11],
  2427. b1[7:0], b0[10:11]
  2428. */
  2429. uint32 pqep[8];
  2430. pqep[0] = dqep[0] & 1023;
  2431. pqep[1] = dqep[1] & 1023;
  2432. pqep[2] = dqep[2] & 1023;
  2433. pqep[4] = dqep[4] + reverse_bits(dqep[0] >> 10, 2) * 256;
  2434. pqep[5] = dqep[5] + reverse_bits(dqep[1] >> 10, 2) * 256;
  2435. pqep[6] = dqep[6] + reverse_bits(dqep[2] >> 10, 2) * 256;
  2436. packed[0] = get_mode_prefix(12);
  2437. packed[1] = (pqep[2] << 20) + (pqep[1] << 10) + pqep[0];
  2438. packed[2] = (pqep[6] << 20) + (pqep[5] << 10) + pqep[4];
  2439. }
  2440. else if (mode == 13)
  2441. {
  2442. int dqep[8];
  2443. for (uniform int p = 0; p < 3; p++)
  2444. {
  2445. dqep[p] = qep[p];
  2446. dqep[4 + p] = (qep[4 + p] - qep[p]) & 15;
  2447. }
  2448. for (uniform int i = 1; i < 2; i++)
  2449. for (uniform int p = 0; p < 3; p++)
  2450. {
  2451. assert( qep[i * 4 + p] - qep[p] <= 7);
  2452. assert(-8 <= qep[i * 4 + p] - qep[p]);
  2453. }
  2454. /*
  2455. r0[9:0], g0[9:0], b0[9:0],
  2456. r1[3:0], r0[10:15],
  2457. g1[3:0], g0[10:15],
  2458. b1[3:0], b0[10:15]
  2459. */
  2460. uint32 pqep[8];
  2461. pqep[0] = dqep[0] & 1023;
  2462. pqep[1] = dqep[1] & 1023;
  2463. pqep[2] = dqep[2] & 1023;
  2464. pqep[4] = dqep[4] + reverse_bits(dqep[0] >> 10, 6) * 16;
  2465. pqep[5] = dqep[5] + reverse_bits(dqep[1] >> 10, 6) * 16;
  2466. pqep[6] = dqep[6] + reverse_bits(dqep[2] >> 10, 6) * 16;
  2467. packed[0] = get_mode_prefix(13);
  2468. packed[1] = (pqep[2] << 20) + (pqep[1] << 10) + pqep[0];
  2469. packed[2] = (pqep[6] << 20) + (pqep[5] << 10) + pqep[4];
  2470. }
  2471. else
  2472. {
  2473. assert(false);
  2474. }
  2475. }
  2476. void bc6h_code_2p(uint32 data[5], int qep[], uint32 qblock[2], int part_id, int mode)
  2477. {
  2478. uniform int bits = 3;
  2479. uniform int pairs = 2;
  2480. uniform int channels = 3;
  2481. int flips = bc7_code_apply_swap_mode01237(qep, qblock, 1, part_id);
  2482. for (uniform int k=0; k<5; k++) data[k] = 0;
  2483. uniform int pos = 0;
  2484. uint32 packed[4];
  2485. bc6h_pack(packed, qep, mode);
  2486. // mode
  2487. put_bits(data, &pos, 5, packed[0]);
  2488. // endpoints
  2489. put_bits(data, &pos, 30, packed[1]);
  2490. put_bits(data, &pos, 30, packed[2]);
  2491. put_bits(data, &pos, 12, packed[3]);
  2492. // partition
  2493. put_bits(data, &pos, 5, part_id);
  2494. // quantized values
  2495. bc7_code_qblock(data, &pos, qblock, bits, flips);
  2496. bc7_code_adjust_skip_mode01237(data, 1, part_id);
  2497. }
  2498. void bc6h_code_1p(uint32 data[5], int qep[8], uint32 qblock[2], int mode)
  2499. {
  2500. bc7_code_apply_swap_mode456(qep, 4, qblock, 4);
  2501. for (uniform int k = 0; k<5; k++) data[k] = 0;
  2502. uniform int pos = 0;
  2503. uint32 packed[4];
  2504. bc6h_pack(packed, qep, mode);
  2505. // mode
  2506. put_bits(data, &pos, 5, packed[0]);
  2507. // endpoints
  2508. put_bits(data, &pos, 30, packed[1]);
  2509. put_bits(data, &pos, 30, packed[2]);
  2510. // quantized values
  2511. bc7_code_qblock(data, &pos, qblock, 4, 0);
  2512. }
  2513. //////////////////////////
  2514. // BC6H core
  2515. void bc6h_setup(bc6h_enc_state state[])
  2516. {
  2517. for (uniform int p = 0; p < 3; p++)
  2518. {
  2519. state->rgb_bounds[p ] = 0xFFFF;
  2520. state->rgb_bounds[3+p] = 0;
  2521. }
  2522. // uf16 conversion, min/max
  2523. for (uniform int p = 0; p < 3; p++)
  2524. for (uniform int k = 0; k < 16; k++)
  2525. {
  2526. state->block[p * 16 + k] = (state->block[p * 16 + k] / 31) * 64;
  2527. state->rgb_bounds[p ] = min(state->rgb_bounds[p ], state->block[p * 16 + k]);
  2528. state->rgb_bounds[3+p] = max(state->rgb_bounds[3+p], state->block[p * 16 + k]);
  2529. }
  2530. state->max_span = 0;
  2531. state->max_span_idx = 0;
  2532. float rgb_span[] = { 0, 0, 0 };
  2533. for (uniform int p = 0; p < 3; p++)
  2534. {
  2535. rgb_span[p] = state->rgb_bounds[3+p] - state->rgb_bounds[p];
  2536. if (rgb_span[p] > state->max_span)
  2537. {
  2538. state->max_span_idx = p;
  2539. state->max_span = rgb_span[p];
  2540. }
  2541. }
  2542. }
  2543. inline void CompressBlockBC6H_core(bc6h_enc_state state[])
  2544. {
  2545. bc6h_setup(state);
  2546. if (state->slow_mode)
  2547. {
  2548. bc6h_test_mode(state, 0, true, 0);
  2549. bc6h_test_mode(state, 1, true, 0);
  2550. bc6h_test_mode(state, 2, true, 0);
  2551. bc6h_test_mode(state, 5, true, 0);
  2552. bc6h_test_mode(state, 6, true, 0);
  2553. bc6h_test_mode(state, 9, true, 0);
  2554. bc6h_test_mode(state, 10, true, 0);
  2555. bc6h_test_mode(state, 11, true, 0);
  2556. bc6h_test_mode(state, 12, true, 0);
  2557. bc6h_test_mode(state, 13, true, 0);
  2558. }
  2559. else
  2560. {
  2561. if (state->fastSkipTreshold > 0)
  2562. {
  2563. bc6h_test_mode(state, 9, false, 0);
  2564. if (state->fast_mode) bc6h_test_mode(state, 1, false, 1);
  2565. bc6h_test_mode(state, 6, false, 1 / 1.2);
  2566. bc6h_test_mode(state, 5, false, 1 / 1.2);
  2567. bc6h_test_mode(state, 0, false, 1 / 1.2);
  2568. bc6h_test_mode(state, 2, false, 1);
  2569. bc6h_enc_2p(state);
  2570. if (!state->fast_mode) bc6h_test_mode(state, 1, true, 0);
  2571. }
  2572. bc6h_test_mode(state, 10, false, 0);
  2573. bc6h_test_mode(state, 11, false, 1);
  2574. bc6h_test_mode(state, 12, false, 1);
  2575. bc6h_test_mode(state, 13, false, 1);
  2576. bc6h_enc_1p(state);
  2577. }
  2578. }
  2579. void bc6h_enc_copy_settings(bc6h_enc_state state[], uniform bc6h_enc_settings settings[])
  2580. {
  2581. state->slow_mode = settings->slow_mode;
  2582. state->fast_mode = settings->fast_mode;
  2583. state->fastSkipTreshold = settings->fastSkipTreshold;
  2584. state->refineIterations_1p = settings->refineIterations_1p;
  2585. state->refineIterations_2p = settings->refineIterations_2p;
  2586. }
  2587. inline void CompressBlockBC6H(uniform rgba_surface src[], int xx, uniform int yy, uniform uint8 dst[], uniform bc6h_enc_settings settings[])
  2588. {
  2589. bc6h_enc_state _state;
  2590. varying bc6h_enc_state* uniform state = &_state;
  2591. bc6h_enc_copy_settings(state, settings);
  2592. load_block_interleaved_16bit(state->block, src, xx, yy);
  2593. state->best_err = FLT_MAX;
  2594. CompressBlockBC6H_core(state);
  2595. store_data(dst, src->width, xx, yy, state->best_data, 4);
  2596. }
  2597. export void CompressBlocksBC6H_ispc(uniform rgba_surface src[], uniform uint8 dst[], uniform bc6h_enc_settings settings[])
  2598. {
  2599. const int h_4=src->height/4, w_4=src->width/4;
  2600. for (uniform int yy = 0; yy<h_4; yy++)
  2601. for (uniform int xx = 0; xx<w_4; xx++) // foreach (xx = 0 ... src->width/4)
  2602. {
  2603. CompressBlockBC6H(src, xx, yy, dst, settings);
  2604. }
  2605. }
  2606. ///////////////////////////////////////////////////////////
  2607. // ETC encoding
  2608. /*struct etc_enc_settings
  2609. {
  2610. int fastSkipTreshold;
  2611. };*/
  2612. struct etc_enc_state
  2613. {
  2614. float block[64];
  2615. int prev_qcenter[3];
  2616. float best_err;
  2617. uint32 best_data[2];
  2618. uniform bool diff;
  2619. // settings
  2620. uniform int fastSkipTreshold;
  2621. };
  2622. static uniform const int etc_codeword_table[8][4] =
  2623. {
  2624. { -8, -2, 2, 8 },
  2625. { -17, -5, 5, 17 },
  2626. { -29, -9, 9, 29 },
  2627. { -42, -13, 13, 42 },
  2628. { -60, -18, 18, 60 },
  2629. { -80, -24, 24, 80 },
  2630. { -106, -33, 33, 106 },
  2631. { -183, -47, 47, 183 },
  2632. };
  2633. inline uniform int get_etc1_dY(uniform int table, uniform int q)
  2634. {
  2635. return etc_codeword_table[table][q];
  2636. }
  2637. uniform int remap_q[] = { 2, 3, 1, 0 };
  2638. int get_remap2_q(int x)
  2639. {
  2640. x -= 2;
  2641. if (x < 0) x = 1 - x;
  2642. return x;
  2643. }
  2644. int extend_4to8bits(int value)
  2645. {
  2646. return (value << 4) | value;
  2647. }
  2648. int extend_5to8bits(int value)
  2649. {
  2650. return (value << 3) | (value >> 2);
  2651. }
  2652. int quantize_4bits(float value)
  2653. {
  2654. return clamp((value / 255.0f) * 15 + 0.5f, 0, 15);
  2655. }
  2656. int quantize_5bits(float value)
  2657. {
  2658. return clamp((value / 255.0f) * 31 + 0.5f, 0, 31);
  2659. }
  2660. void center_quant_dequant(int qcenter[3], float center[3], uniform bool diff, int prev_qcenter[3])
  2661. {
  2662. if (diff)
  2663. {
  2664. for (uniform int p = 0; p < 3; p++)
  2665. {
  2666. qcenter[p] = quantize_5bits(center[p]);
  2667. if (prev_qcenter[0] >= 0)
  2668. {
  2669. if (qcenter[p] - prev_qcenter[p] > 3) qcenter[p] = prev_qcenter[p] + 3;
  2670. if (qcenter[p] - prev_qcenter[p] < -4) qcenter[p] = prev_qcenter[p] - 4;
  2671. }
  2672. center[p] = extend_5to8bits(qcenter[p]);
  2673. }
  2674. }
  2675. else
  2676. {
  2677. for (uniform int p = 0; p < 3; p++)
  2678. {
  2679. qcenter[p] = quantize_4bits(center[p]);
  2680. center[p] = extend_4to8bits(qcenter[p]);
  2681. }
  2682. }
  2683. }
  2684. float quantize_pixels_etc1_half(uint32 qblock[1], float block[48], float center[3], uniform int table)
  2685. {
  2686. float total_err = 0;
  2687. uint32 bits = 0;
  2688. for (uniform int y = 0; y < 2; y++)
  2689. for (uniform int x = 0; x < 4; x++)
  2690. {
  2691. float best_err = sq(255) * 3;
  2692. int best_q = -1;
  2693. for (uniform int q = 0; q < 4; q++)
  2694. {
  2695. int dY = get_etc1_dY(table, remap_q[q]);
  2696. float err = 0;
  2697. for (int p = 0; p < 3; p++)
  2698. err += sq(block[16 * p + y*4+x] - clamp(center[p] + dY, 0, 255));
  2699. if (err < best_err)
  2700. {
  2701. best_err = err;
  2702. best_q = q;
  2703. }
  2704. }
  2705. assert(best_q >= 0);
  2706. bits |= (best_q & 1) << (x * 4 + y);
  2707. bits |= (best_q >> 1) << (x * 4 + y + 16);
  2708. total_err += best_err;
  2709. }
  2710. qblock[0] = bits;
  2711. return total_err;
  2712. }
  2713. float compress_etc1_half_1(uint32 out_qbits[1], int out_table[1], int out_qcenter[3],
  2714. float half_pixels[], uniform bool diff, int prev_qcenter[3])
  2715. {
  2716. float dc[3];
  2717. for (uniform int p = 0; p<3; p++) dc[p] = 0;
  2718. for (uniform int k = 0; k<8; k++)
  2719. {
  2720. for (uniform int p = 0; p<3; p++)
  2721. dc[p] += half_pixels[k + p * 16];
  2722. }
  2723. float best_error = sq(255) * 3 * 8.0f;
  2724. int best_table = -1;
  2725. int best_qcenter[3];
  2726. uint32 best_qbits;
  2727. for (uniform int table_level = 0; table_level < 8; table_level++)
  2728. {
  2729. float center[3];
  2730. int qcenter[3];
  2731. uint32 qbits;
  2732. for (uniform int p = 0; p < 3; p++) center[p] = dc[p] / 8 - get_etc1_dY(table_level, 2);
  2733. center_quant_dequant(qcenter, center, diff, prev_qcenter);
  2734. float err = quantize_pixels_etc1_half(&qbits, half_pixels, center, table_level);
  2735. if (err < best_error)
  2736. {
  2737. best_error = err;
  2738. best_table = table_level;
  2739. best_qbits = qbits;
  2740. for (uniform int p = 0; p < 3; p++) best_qcenter[p] = qcenter[p];
  2741. }
  2742. }
  2743. out_table[0] = best_table;
  2744. out_qbits[0] = best_qbits;
  2745. for (uniform int p = 0; p < 3; p++) out_qcenter[p] = best_qcenter[p];
  2746. return best_error;
  2747. }
  2748. float optimize_center(float colors[4][10], uniform int p, uniform int table_level)
  2749. {
  2750. float best_center = 0;
  2751. for (uniform int q = 0; q < 4; q++)
  2752. {
  2753. best_center += (colors[q][7 + p] - get_etc1_dY(table_level, q)) * colors[q][3];
  2754. }
  2755. best_center /= 8;
  2756. float best_err = 0;
  2757. for (uniform int q = 0; q < 4; q++)
  2758. {
  2759. float dY = get_etc1_dY(table_level, q);
  2760. best_err += sq(clamp(best_center + dY, 0, 255) - colors[q][7 + p]) * colors[q][3];
  2761. }
  2762. for (uniform int branch = 0; branch < 4; branch++)
  2763. {
  2764. float new_center = 0;
  2765. float sum = 0;
  2766. for (uniform int q = 0; q < 4; q++)
  2767. {
  2768. if (branch <= 1 && q <= branch) continue;
  2769. if (branch >= 2 && q >= branch) continue;
  2770. new_center += (colors[q][7 + p] - get_etc1_dY(table_level, q)) * colors[q][3];
  2771. sum += colors[q][3];
  2772. }
  2773. new_center /= sum;
  2774. float err = 0;
  2775. for (uniform int q = 0; q < 4; q++)
  2776. {
  2777. float dY = get_etc1_dY(table_level, q);
  2778. err += sq(clamp(new_center + dY, 0, 255) - colors[q][7 + p]) * colors[q][3];
  2779. }
  2780. if (err < best_err)
  2781. {
  2782. best_err = err;
  2783. best_center = new_center;
  2784. }
  2785. }
  2786. return best_center;
  2787. }
  2788. float compress_etc1_half_7(uint32 out_qbits[1], int out_table[1], int out_qcenter[3],
  2789. float half_pixels[], etc_enc_state state[])
  2790. {
  2791. int err_list[165];
  2792. int y_sorted_inv[8];
  2793. float y_sorted[8];
  2794. {
  2795. int y_sorted_idx[8];
  2796. for (uniform int k = 0; k < 8; k++)
  2797. {
  2798. float value = 0;
  2799. for (uniform int p = 0; p < 3; p++)
  2800. value += half_pixels[k + p * 16];
  2801. y_sorted_idx[k] = (((int)value) << 4) + k;
  2802. }
  2803. partial_sort_list(y_sorted_idx, 8, 8);
  2804. for (uniform int k = 0; k < 8; k++)
  2805. y_sorted_inv[k] = ((y_sorted_idx[k] & 0xF) << 4) + k;
  2806. for (uniform int k = 0; k < 8; k++)
  2807. y_sorted[k] = (y_sorted_idx[k] >> 4) / 3.0f;
  2808. partial_sort_list(y_sorted_inv, 8, 8);
  2809. }
  2810. uniform int idx = -1;
  2811. for (uniform int level1 = 0; level1 <= 8; level1++)
  2812. for (uniform int level2 = level1; level2 <= 8; level2++)
  2813. for (uniform int level3 = level2; level3 <= 8; level3++)
  2814. {
  2815. idx++;
  2816. assert(idx < 165);
  2817. float sum[4];
  2818. float sum_sq[4];
  2819. float count[4];
  2820. float inv_count[4];
  2821. for (uniform int q = 0; q < 4; q++)
  2822. {
  2823. sum[q] = 0;
  2824. sum_sq[q] = 0;
  2825. count[q] = 0;
  2826. inv_count[q] = 0;
  2827. }
  2828. for (uniform int k = 0; k < 8; k++)
  2829. {
  2830. uniform int q = 0;
  2831. if (k >= level1) q = 1;
  2832. if (k >= level2) q = 2;
  2833. if (k >= level3) q = 3;
  2834. sum[q] += y_sorted[k];
  2835. sum_sq[q] += sq(y_sorted[k]);
  2836. count[q] += 1;
  2837. }
  2838. for (uniform int q = 0; q < 4; q++)
  2839. {
  2840. if (count[q] > 0) inv_count[q] = 1 / count[q];
  2841. }
  2842. float base_err = 0;
  2843. for (uniform int q = 0; q < 4; q++) base_err += sum_sq[q] - sq(sum[q]) * inv_count[q];
  2844. float t_err = sq(256) * 8;
  2845. for (uniform int table_level = 0; table_level < 8; table_level++)
  2846. {
  2847. float center = 0;
  2848. for (uniform int q = 0; q < 4; q++) center += sum[q] - get_etc1_dY(table_level, q) * count[q];
  2849. center /= 8;
  2850. float err = base_err;
  2851. for (uniform int q = 0; q < 4; q++)
  2852. {
  2853. err += sq(center + get_etc1_dY(table_level, q) - sum[q] * inv_count[q])*count[q];
  2854. }
  2855. t_err = min(t_err, err);
  2856. }
  2857. int packed = (level1 * 16 + level2) * 16 + level3;
  2858. err_list[idx] = (((int)t_err) << 12) + packed;
  2859. }
  2860. partial_sort_list(err_list, 165, state->fastSkipTreshold);
  2861. float best_error = sq(255) * 3 * 8.0f;
  2862. int best_table = -1;
  2863. int best_qcenter[3];
  2864. uint32 best_qbits;
  2865. for (uniform int i = 0; i < state->fastSkipTreshold; i++)
  2866. {
  2867. int packed = err_list[i] & 0xFFF;
  2868. int level1 = (packed >> 8) & 0xF;
  2869. int level2 = (packed >> 4) & 0xF;
  2870. int level3 = (packed >> 0) & 0xF;
  2871. float colors[4][10];
  2872. for (uniform int p = 0; p < 7; p++)
  2873. for (uniform int q = 0; q < 4; q++) colors[q][p] = 0;
  2874. uint32 qbits = 0;
  2875. for (uniform int kk = 0; kk < 8; kk++)
  2876. {
  2877. int k = y_sorted_inv[kk] & 0xF;
  2878. int qq = 0;
  2879. if (k >= level1) qq = 1;
  2880. if (k >= level2) qq = 2;
  2881. if (k >= level3) qq = 3;
  2882. uniform int xx = kk & 3;
  2883. uniform int yy = kk >> 2;
  2884. int qqq = get_remap2_q(qq);
  2885. qbits |= (qqq & 1) << (yy + xx * 4);
  2886. qbits |= (qqq >> 1) << (16 + yy + xx * 4);
  2887. float qvec[4];
  2888. for (uniform int q = 0; q < 4; q++)
  2889. {
  2890. qvec[q] = q == qq ? 1.0 : 0.0;
  2891. colors[q][3] += qvec[q];
  2892. }
  2893. for (uniform int p = 0; p < 3; p++)
  2894. {
  2895. float value = half_pixels[16 * p + kk];
  2896. for (uniform int q = 0; q < 4; q++)
  2897. {
  2898. colors[q][p] += value * qvec[q];
  2899. colors[q][4 + p] += sq(value) * qvec[q];
  2900. }
  2901. }
  2902. }
  2903. float base_err = 0;
  2904. for (uniform int q = 0; q < 4; q++)
  2905. {
  2906. if (colors[q][3] > 0)
  2907. for (uniform int p = 0; p < 3; p++)
  2908. {
  2909. colors[q][7 + p] = colors[q][p] / colors[q][3];
  2910. base_err += colors[q][4 + p] - sq(colors[q][7 + p])*colors[q][3];
  2911. }
  2912. }
  2913. for (uniform int table_level = 0; table_level < 8; table_level++)
  2914. {
  2915. float center[3];
  2916. int qcenter[3];
  2917. for (uniform int p = 0; p < 3; p++)
  2918. {
  2919. center[p] = optimize_center(colors, p, table_level);
  2920. }
  2921. center_quant_dequant(qcenter, center, state->diff, state->prev_qcenter);
  2922. float err = base_err;
  2923. for (uniform int q = 0; q < 4; q++)
  2924. {
  2925. int dY = get_etc1_dY(table_level, q);
  2926. for (uniform int p = 0; p < 3; p++)
  2927. err += sq(clamp(center[p] + dY, 0, 255) - colors[q][7 + p])*colors[q][3];
  2928. }
  2929. if (err < best_error)
  2930. {
  2931. best_error = err;
  2932. best_table = table_level;
  2933. best_qbits = qbits;
  2934. for (uniform int p = 0; p < 3; p++) best_qcenter[p] = qcenter[p];
  2935. }
  2936. }
  2937. }
  2938. out_table[0] = best_table;
  2939. out_qbits[0] = best_qbits;
  2940. for (uniform int p = 0; p < 3; p++) out_qcenter[p] = best_qcenter[p];
  2941. return best_error;
  2942. }
  2943. float compress_etc1_half(uint32 qbits[1], int table[1], int qcenter[3], float half_pixels[], etc_enc_state state[])
  2944. {
  2945. float err = compress_etc1_half_7(qbits, table, qcenter, half_pixels, state);
  2946. for (uniform int p = 0; p < 3; p++)
  2947. state->prev_qcenter[p] = qcenter[p];
  2948. return err;
  2949. }
  2950. //////////////////////////
  2951. // ETC1 core
  2952. inline uint32 bswap32(uint32 v)
  2953. {
  2954. uint32 r = 0;
  2955. r += ((v >> 24) & 255) << 0;
  2956. r += ((v >> 16) & 255) << 8;
  2957. r += ((v >> 8) & 255) << 16;
  2958. r += ((v >> 0) & 255) << 24;
  2959. return r;
  2960. }
  2961. void etc_pack(uint32 data[], uint32 qbits[2], int tables[2], int qcenters[2][3], uniform int diff, uniform int flip)
  2962. {
  2963. for (uniform int k = 0; k < 2; k++) data[k] = 0;
  2964. uniform int pos = 0;
  2965. if (diff == 0)
  2966. {
  2967. put_bits(data, &pos, 4, qcenters[1][0]);
  2968. put_bits(data, &pos, 4, qcenters[0][0]);
  2969. put_bits(data, &pos, 4, qcenters[1][1]);
  2970. put_bits(data, &pos, 4, qcenters[0][1]);
  2971. put_bits(data, &pos, 4, qcenters[1][2]);
  2972. put_bits(data, &pos, 4, qcenters[0][2]);
  2973. }
  2974. else
  2975. {
  2976. put_bits(data, &pos, 3, (qcenters[1][0] - qcenters[0][0]) & 7);
  2977. put_bits(data, &pos, 5, qcenters[0][0]);
  2978. put_bits(data, &pos, 3, (qcenters[1][1] - qcenters[0][1]) & 7);
  2979. put_bits(data, &pos, 5, qcenters[0][1]);
  2980. put_bits(data, &pos, 3, (qcenters[1][2] - qcenters[0][2]) & 7);
  2981. put_bits(data, &pos, 5, qcenters[0][2]);
  2982. }
  2983. put_bits(data, &pos, 1, flip);
  2984. put_bits(data, &pos, 1, diff);
  2985. put_bits(data, &pos, 3, tables[1]);
  2986. put_bits(data, &pos, 3, tables[0]);
  2987. uint32 all_qbits_flipped = (qbits[1] << 2) | qbits[0];
  2988. uint32 all_qbits = 0;
  2989. if (flip != 0) all_qbits = all_qbits_flipped;
  2990. if (flip == 0)
  2991. for (uniform int k = 0; k < 2; k++)
  2992. for (uniform int y = 0; y < 4; y++)
  2993. for (uniform int x = 0; x < 4; x++)
  2994. {
  2995. int bit = (all_qbits_flipped >> (k * 16 + x * 4 + y)) & 1;
  2996. all_qbits += bit << (k * 16 + y * 4 + x);
  2997. }
  2998. data[1] = bswap32(all_qbits);
  2999. }
  3000. inline void CompressBlockETC1_core(etc_enc_state state[])
  3001. {
  3002. float flipped_block[48];
  3003. for (uniform int y = 0; y < 4; y++)
  3004. for (uniform int x = 0; x < 4; x++)
  3005. for (uniform int p = 0; p < 3; p++)
  3006. {
  3007. flipped_block[16 * p + x * 4 + y] = state->block[16 * p + y * 4 + x];
  3008. }
  3009. for (uniform int flip = 0; flip < 2; flip++)
  3010. for (uniform int diff = 1; diff >= 0; diff--)
  3011. {
  3012. state->diff = diff == 1;
  3013. state->prev_qcenter[0] = -1;
  3014. varying float * uniform pixels = state->block;
  3015. if (flip == 0) pixels = flipped_block;
  3016. uint32 qbits[2];
  3017. int tables[2];
  3018. int qcenters[2][3];
  3019. float err = 0;
  3020. err += compress_etc1_half(&qbits[0], &tables[0], qcenters[0], &pixels[0], state);
  3021. err += compress_etc1_half(&qbits[1], &tables[1], qcenters[1], &pixels[8], state);
  3022. if (err < state->best_err)
  3023. {
  3024. state->best_err = err;
  3025. etc_pack(state->best_data, qbits, tables, qcenters, diff, flip);
  3026. }
  3027. }
  3028. }
  3029. void etc_enc_copy_settings(etc_enc_state state[], uniform etc_enc_settings settings[])
  3030. {
  3031. state->fastSkipTreshold = settings->fastSkipTreshold;
  3032. }
  3033. inline void CompressBlockETC1(uniform rgba_surface src[], int xx, uniform int yy, uniform uint8 dst[], uniform etc_enc_settings settings[])
  3034. {
  3035. etc_enc_state _state;
  3036. varying etc_enc_state* uniform state = &_state;
  3037. etc_enc_copy_settings(state, settings);
  3038. load_block_interleaved(state->block, src, xx, yy);
  3039. state->best_err = FLT_MAX;
  3040. CompressBlockETC1_core(state);
  3041. store_data(dst, src->width, xx, yy, state->best_data, 2);
  3042. }
  3043. export void CompressBlocksETC1_ispc(uniform rgba_surface src[], uniform uint8 dst[], uniform etc_enc_settings settings[])
  3044. {
  3045. const int h_4=src->height/4, w_4=src->width/4;
  3046. for (uniform int yy = 0; yy<h_4; yy++)
  3047. for (uniform int xx = 0; xx<w_4; xx++) // foreach (xx = 0 ... src->width/4)
  3048. {
  3049. CompressBlockETC1(src, xx, yy, dst, settings);
  3050. }
  3051. }
  3052. } // namespace ispc ESENTHEL CHANGED