| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727 |
- /*
- Copyright (c) 2013, Intel Corporation
- All rights reserved.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
- OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
- typedef U8 uint8;
- typedef U16 uint16;
- typedef I32 int32;
- typedef U32 uint32;
- typedef U64 uint64;
- #define uniform
- #define varying
- #define export
- namespace ispc
- {
- inline float abs(float x) {return fabsf(x);}
- inline float sqrt(float x) {return sqrtf(x);}
- inline float rsqrt(float x) {return 1.0f/sqrtf(x);}
- inline float rcp (float x) {return 1.0f/ x ;}
- inline float min(float a, float b) {return (a<=b) ? a : b;}
- inline float max(float a, float b) {return (a>=b) ? a : b;}
- inline float clamp(float v, float a, float b)
- {
- return (v<=a) ? a : (v>=b) ? b : v;
- }
- ///////////////////////////
- // generic helpers
- inline void swap_ints(int u[], int v[], uniform int n)
- {
- for (uniform int i=0; i<n; i++)
- {
- int t = u[i];
- u[i] = v[i];
- v[i] = t;
- }
- }
- inline void swap_uints(uint32 u[], uint32 v[], uniform int n)
- {
- for (uniform int i=0; i<n; i++)
- {
- uint32 t = u[i];
- u[i] = v[i];
- v[i] = t;
- }
- }
- inline float sq(float v)
- {
- return v*v;
- }
- inline int pow2(int x)
- {
- return 1<<x;
- }
- inline float clamp(float v, int a, int b)
- {
- return (v<=a) ? a : (v>=b) ? b : v;
- }
- // the following helpers isolate performance warnings
- inline uint32 gather_uint(const uniform uint32* const uniform ptr, int idx)
- {
- return ptr[idx]; // (perf warning expected)
- }
- /*inline uint32 gather_uint(const varying uint32* const uniform ptr, int idx)
- {
- return ptr[idx]; // (perf warning expected)
- }*/
- inline int32 gather_int(const uniform int32* const uniform ptr, int idx)
- {
- return ptr[idx]; // (perf warning expected)
- }
- inline float gather_float(varying float* uniform ptr, int idx)
- {
- return ptr[idx]; // (perf warning expected)
- }
- inline void scatter_uint(uniform uint32* ptr, int idx, uint32 value)
- {
- ptr[idx] = value; // (perf warning expected)
- }
- inline void scatter_int(varying int32* uniform ptr, int idx, uint32 value)
- {
- ptr[idx] = value; // (perf warning expected)
- }
- inline uint32 shift_right(uint32 v, const uniform int bits)
- {
- return v>>bits; // (perf warning expected)
- }
- ///////////////////////////////////////////////////////////
- // BC1/BC7 shared
- /*struct rgba_surface
- {
- uint8* ptr;
- int width, height, stride;
- };*/
- inline void load_block_interleaved(float block[48], uniform rgba_surface* uniform src, int xx, uniform int yy)
- {
- for (uniform int y = 0; y<4; y++)
- for (uniform int x = 0; x<4; x++)
- {
- uniform uint32* uniform src_ptr = (uint32*)&src->ptr[(yy*4+y)*src->stride];
- uint32 rgba = gather_uint(src_ptr, xx*4+x);
- block[16 * 0 + y * 4 + x] = (int)((rgba >> 0) & 255);
- block[16 * 1 + y * 4 + x] = (int)((rgba >> 8) & 255);
- block[16 * 2 + y * 4 + x] = (int)((rgba >> 16) & 255);
- }
- }
- inline void load_block_interleaved_rgba(float block[64], uniform rgba_surface* uniform src, int xx, uniform int yy)
- {
- for (uniform int y=0; y<4; y++)
- for (uniform int x=0; x<4; x++)
- {
- uniform uint32* uniform src_ptr = (uint32*)&src->ptr[(yy*4+y)*src->stride];
- uint32 rgba = gather_uint(src_ptr, xx*4+x);
- block[16*0+y*4+x] = (int)((rgba>> 0)&255);
- block[16*1+y*4+x] = (int)((rgba>> 8)&255);
- block[16*2+y*4+x] = (int)((rgba>>16)&255);
- block[16*3+y*4+x] = (int)((rgba>>24)&255);
- }
- }
- inline void load_block_interleaved_16bit(float block[48], uniform rgba_surface* uniform src, int xx, uniform int yy)
- {
- for (uniform int y = 0; y<4; y++)
- for (uniform int x = 0; x<4; x++)
- {
- uniform uint32* uniform src_ptr_r = (uint32*)&src->ptr[(yy * 4 + y)*src->stride + 0];
- uniform uint32* uniform src_ptr_g = (uint32*)&src->ptr[(yy * 4 + y)*src->stride + 2];
- uniform uint32* uniform src_ptr_b = (uint32*)&src->ptr[(yy * 4 + y)*src->stride + 4];
- uint32 xr = gather_uint(src_ptr_r, (xx * 4 + x) * 2);
- uint32 xg = gather_uint(src_ptr_g, (xx * 4 + x) * 2);
- uint32 xb = gather_uint(src_ptr_b, (xx * 4 + x) * 2);
- block[16 * 0 + y * 4 + x] = (int)(xr & 0xFFFF);
- block[16 * 1 + y * 4 + x] = (int)(xg & 0xFFFF);
- block[16 * 2 + y * 4 + x] = (int)(xb & 0xFFFF);
- block[16 * 3 + y * 4 + x] = 0;
- }
- }
- inline void store_data(uniform uint8 dst[], int width, int xx, uniform int yy, uint32 data[], int data_size)
- {
- for (uniform int k=0; k<data_size; k++)
- {
- uniform uint32* dst_ptr = (uint32*)&dst[(yy)*width*data_size];
- scatter_uint(dst_ptr, xx*data_size+k, data[k]);
- }
- }
- inline void ssymv(float a[3], float covar[6], float b[3])
- {
- a[0] = covar[0]*b[0]+covar[1]*b[1]+covar[2]*b[2];
- a[1] = covar[1]*b[0]+covar[3]*b[1]+covar[4]*b[2];
- a[2] = covar[2]*b[0]+covar[4]*b[1]+covar[5]*b[2];
- }
- inline void ssymv3(float a[4], float covar[10], float b[4])
- {
- a[0] = covar[0]*b[0]+covar[1]*b[1]+covar[2]*b[2];
- a[1] = covar[1]*b[0]+covar[4]*b[1]+covar[5]*b[2];
- a[2] = covar[2]*b[0]+covar[5]*b[1]+covar[7]*b[2];
- }
- inline void ssymv4(float a[4], float covar[10], float b[4])
- {
- a[0] = covar[0]*b[0]+covar[1]*b[1]+covar[2]*b[2]+covar[3]*b[3];
- a[1] = covar[1]*b[0]+covar[4]*b[1]+covar[5]*b[2]+covar[6]*b[3];
- a[2] = covar[2]*b[0]+covar[5]*b[1]+covar[7]*b[2]+covar[8]*b[3];
- a[3] = covar[3]*b[0]+covar[6]*b[1]+covar[8]*b[2]+covar[9]*b[3];
- }
- inline void compute_axis3(float axis[3], float covar[6], uniform const int powerIterations)
- {
- float vec[3] = {1,1,1};
- for (uniform int i=0; i<powerIterations; i++)
- {
- ssymv(axis, covar, vec);
- for (uniform int p=0; p<3; p++) vec[p] = axis[p];
- if (i%2==1) // renormalize every other iteration
- {
- float norm_sq = 0;
- for (uniform int p=0; p<3; p++)
- norm_sq += axis[p]*axis[p];
- float rnorm = rsqrt(norm_sq);
- for (uniform int p=0; p<3; p++) vec[p] *= rnorm;
- }
- }
- for (uniform int p=0; p<3; p++) axis[p] = vec[p];
- }
- inline void compute_axis(float axis[4], float covar[10], uniform const int powerIterations, uniform int channels)
- {
- float vec[4] = {1,1,1,1};
- for (uniform int i=0; i<powerIterations; i++)
- {
- if (channels == 3) ssymv3(axis, covar, vec);
- if (channels == 4) ssymv4(axis, covar, vec);
- for (uniform int p=0; p<channels; p++) vec[p] = axis[p];
- if (i%2==1) // renormalize every other iteration
- {
- float norm_sq = 0;
- for (uniform int p=0; p<channels; p++)
- norm_sq += axis[p]*axis[p];
- float rnorm = rsqrt(norm_sq);
- for (uniform int p=0; p<channels; p++) vec[p] *= rnorm;
- }
- }
- for (uniform int p=0; p<channels; p++) axis[p] = vec[p];
- }
- ///////////////////////////////////////////////////////////
- // BC1/BC3 encoding
- inline int stb__Mul8Bit(int a, int b)
- {
- int t = a*b + 128;
- return (t + (t >> 8)) >> 8;
- }
- inline uint16 stb__As16Bit(int r, int g, int b)
- {
- return (stb__Mul8Bit(r,31) << 11) + (stb__Mul8Bit(g,63) << 5) + stb__Mul8Bit(b,31);
- }
- inline uint16 enc_rgb565(float c[3])
- {
- return stb__As16Bit((int)c[0], (int)c[1], (int)c[2]);
- }
- inline void dec_rgb565(float c[3], int p)
- {
- int c2 = (p>>0)&31;
- int c1 = (p>>5)&63;
- int c0 = (p>>11)&31;
- c[0] = (c0<<3)+(c0>>2);
- c[1] = (c1<<2)+(c1>>4);
- c[2] = (c2<<3)+(c2>>2);
- }
- inline void pick_endpoints_dc(int c0[3], int c1[3], int block[48], int iaxis[3])
- {
- for (uniform int p=0; p<3; p++)
- for (uniform int y=0; y<4; y++)
- for (uniform int x=0; x<4; x++)
- {
- c0[p] += block[p*16+y*4+x];
- }
- for (uniform int p=0; p<3; p++)
- c0[p] >>= 4;
- }
- inline void pick_endpoints(float c0[3], float c1[3], float block[48], float axis[3], float dc[3])
- {
- float min_dot = 256*256;
- float max_dot = 0;
- for (uniform int y=0; y<4; y++)
- for (uniform int x=0; x<4; x++)
- {
- float dot = 0;
- for (uniform int p=0; p<3; p++)
- dot += (block[p*16+y*4+x]-dc[p])*axis[p];
-
- min_dot = min(min_dot, dot);
- max_dot = max(max_dot, dot);
- }
- if (max_dot-min_dot < 1.0f)
- {
- min_dot -= 0.5f;
- max_dot += 0.5f;
- }
- float norm_sq = 0;
- for (uniform int p=0; p<3; p++)
- norm_sq += axis[p]*axis[p];
- float rnorm_sq = rcp(norm_sq);
- for (uniform int p=0; p<3; p++)
- {
- c0[p] = clamp(dc[p]+min_dot*rnorm_sq*axis[p], 0, 255);
- c1[p] = clamp(dc[p]+max_dot*rnorm_sq*axis[p], 0, 255);
- }
- }
- inline uint32 fast_quant(float block[48], int p0, int p1)
- {
- float c0[3];
- float c1[3];
- dec_rgb565(c0, p0);
- dec_rgb565(c1, p1);
- float dir[3];
- for (uniform int p=0; p<3; p++) dir[p] = c1[p]-c0[p];
-
- float sq_norm = 0;
- for (uniform int p=0; p<3; p++) sq_norm += sq(dir[p]);
- float rsq_norm = rcp(sq_norm);
- for (uniform int p=0; p<3; p++) dir[p] *= rsq_norm*3;
- float bias = 0.5f;
- for (uniform int p=0; p<3; p++) bias -= c0[p]*dir[p];
- uint32 bits = 0;
- uint32 scaler = 1;
- for (uniform int k=0; k<16; k++)
- {
- float dot = 0;
- for (uniform int p=0; p<3; p++)
- dot += block[k+p*16]*dir[p];
- int q = clamp((int)(dot+bias), 0, 3);
- //bits += q<<(k*2);
- bits += q*scaler;
- scaler *= 4;
- }
-
- return bits;
- }
- inline void compute_covar_dc(float covar[6], float dc[3], float block[48])
- {
- for (uniform int i=0; i<6; i++) covar[i] = 0;
- for (uniform int p=0; p<3; p++) dc[p] = 0;
- for (uniform int k=0; k<16; k++)
- {
- for (uniform int p=0; p<3; p++)
- dc[p] += block[k+p*16];
- }
- for (uniform int p=0; p<3; p++) dc[p] /= 16;
-
- for (uniform int k=0; k<16; k++)
- {
- float rgb[3];
- for (uniform int p=0; p<3; p++)
- rgb[p] = block[k+p*16]-dc[p];
-
- covar[0] += rgb[0]*rgb[0];
- covar[1] += rgb[0]*rgb[1];
- covar[2] += rgb[0]*rgb[2];
-
- covar[3] += rgb[1]*rgb[1];
- covar[4] += rgb[1]*rgb[2];
- covar[5] += rgb[2]*rgb[2];
- }
- }
- // ugly, but makes BC1 compression 20% faster overall
- inline void compute_covar_dc_ugly(float covar[6], float dc[3], float block[48])
- {
- for (uniform int p=0; p<3; p++)
- {
- float acc = 0;
- for (uniform int k=0; k<16; k++)
- acc += block[k+p*16];
- dc[p] = acc/16;
- }
-
- float covar0 = 0.0f;
- float covar1 = 0.0f;
- float covar2 = 0.0f;
- float covar3 = 0.0f;
- float covar4 = 0.0f;
- float covar5 = 0.0f;
- for (uniform int k=0; k<16; k++)
- {
- float rgb0, rgb1, rgb2;
- rgb0 = block[k+0*16]-dc[0];
- rgb1 = block[k+1*16]-dc[1];
- rgb2 = block[k+2*16]-dc[2];
-
- covar0 += rgb0*rgb0;
- covar1 += rgb0*rgb1;
- covar2 += rgb0*rgb2;
-
- covar3 += rgb1*rgb1;
- covar4 += rgb1*rgb2;
- covar5 += rgb2*rgb2;
- }
- covar[0] = covar0;
- covar[1] = covar1;
- covar[2] = covar2;
- covar[3] = covar3;
- covar[4] = covar4;
- covar[5] = covar5;
- }
- inline void bc1_refine(int pe[2], float block[48], uint32 bits, float dc[3])
- {
- float c0[3];
- float c1[3];
- if ((bits ^ (bits*4)) < 4)
- {
- // single color
- for (uniform int p=0; p<3; p++)
- {
- c0[p] = dc[p];
- c1[p] = dc[p];
- }
- }
- else
- {
- float Atb1[3] = {0,0,0};
- float sum_q = 0;
- float sum_qq = 0;
- uint32 shifted_bits = bits;
-
- for (uniform int k=0; k<16; k++)
- {
- float q = (int)(shifted_bits&3);
- shifted_bits >>= 2;
- float x = 3-q;
- float y = q;
-
- sum_q += q;
- sum_qq += q*q;
- for (uniform int p=0; p<3; p++) Atb1[p] += x*block[k+p*16];
- }
-
- float sum[3];
- float Atb2[3];
- for (uniform int p=0; p<3; p++)
- {
- sum[p] = dc[p]*16;
- Atb2[p] = 3*sum[p]-Atb1[p];
- }
-
- float Cxx = 16*sq(3)-2*3*sum_q+sum_qq;
- float Cyy = sum_qq;
- float Cxy = 3*sum_q-sum_qq;
- float scale = 3.0f * rcp(Cxx*Cyy - Cxy*Cxy);
- for (uniform int p=0; p<3; p++)
- {
- c0[p] = (Atb1[p]*Cyy - Atb2[p]*Cxy)*scale;
- c1[p] = (Atb2[p]*Cxx - Atb1[p]*Cxy)*scale;
-
- c0[p] = clamp(c0[p], 0, 255);
- c1[p] = clamp(c1[p], 0, 255);
- }
- }
- pe[0] = enc_rgb565(c0);
- pe[1] = enc_rgb565(c1);
- }
- inline uint32 fix_qbits(uint32 qbits)
- {
- uniform const uint32 mask_01b = 0x55555555;
- uniform const uint32 mask_10b = 0xAAAAAAAA;
- uint32 qbits0 = qbits&mask_01b;
- uint32 qbits1 = qbits&mask_10b;
- qbits = (qbits1>>1) + (qbits1 ^ (qbits0<<1));
- return qbits;
- }
- inline void CompressBlockBC1_core(float block[48], uint32 data[2])
- {
- uniform const int powerIterations = 4;
- uniform const int refineIterations = 1;
-
- float covar[6];
- float dc[3];
- compute_covar_dc_ugly(covar, dc, block);
-
- float eps = 0.001f;
- covar[0] += eps;
- covar[3] += eps;
- covar[5] += eps;
-
- float axis[3];
- compute_axis3(axis, covar, powerIterations);
-
- float c0[3];
- float c1[3];
- pick_endpoints(c0, c1, block, axis, dc);
-
- int p[2];
- p[0] = enc_rgb565(c0);
- p[1] = enc_rgb565(c1);
- if (p[0]<p[1]) swap_ints(&p[0], &p[1], 1);
-
- data[0] = (1<<16)*p[1]+p[0];
- data[1] = fast_quant(block, p[0], p[1]);
-
- // refine
- for (uniform int i=0; i<refineIterations; i++)
- {
- bc1_refine(p, block, data[1], dc);
- if (p[0]<p[1]) swap_ints(&p[0], &p[1], 1);
- data[0] = (1<<16)*p[1]+p[0];
- data[1] = fast_quant(block, p[0], p[1]);
- }
-
- data[1] = fix_qbits(data[1]);
- }
- inline void CompressBlockBC3_alpha(float block[16], uint32 data[2])
- {
- float ep[2] = { 255, 0 };
-
- for (uniform int k=0; k<16; k++)
- {
- ep[0] = min(ep[0], block[k]);
- ep[1] = max(ep[1], block[k]);
- }
-
- if (ep[0] == ep[1]) ep[1] = ep[0]+0.1f;
-
- uint32 qblock[2] = { 0, 0 };
- float scale = 7.0f/(ep[1]-ep[0]);
- for (uniform int k=0; k<16; k++)
- {
- float v = block[k];
- float proj = (v-ep[0])*scale+0.5f;
- int q = clamp((int)proj, 0, 7);
- q = 7-q;
- if (q > 0) q++;
- if (q==8) q = 1;
- qblock[k/8] |= q << ((k%8)*3);
- }
- // (could be improved by refinement)
-
- data[0] = clamp((int)ep[0], 0, 255)*256+clamp((int)ep[1], 0, 255);
- data[0] |= qblock[0]<<16;
- data[1] = qblock[0]>>16;
- data[1] |= qblock[1]<<8;
- }
- inline void CompressBlockBC1(uniform rgba_surface src[], int xx, uniform int yy, uniform uint8 dst[])
- {
- float block[48];
- uint32 data[2];
- load_block_interleaved(block, src, xx, yy);
-
- CompressBlockBC1_core(block, data);
- store_data(dst, src->width, xx, yy, data, 2);
- }
- inline void CompressBlockBC3(uniform rgba_surface src[], int xx, uniform int yy, uniform uint8 dst[])
- {
- float block[64];
- uint32 data[4];
- load_block_interleaved_rgba(block, src, xx, yy);
-
- CompressBlockBC3_alpha(&block[48], &data[0]);
- CompressBlockBC1_core(block, &data[2]);
- store_data(dst, src->width, xx, yy, data, 4);
- }
- export void CompressBlocksBC1_ispc(uniform rgba_surface src[], uniform uint8 dst[])
- {
- const int h_4=src->height/4, w_4=src->width/4;
- for (uniform int yy = 0; yy<h_4; yy++)
- for (uniform int xx = 0; xx<w_4; xx++) // foreach (xx = 0 ... src->width/4)
- {
- CompressBlockBC1(src, xx, yy, dst);
- }
- }
- export void CompressBlocksBC3_ispc(uniform rgba_surface src[], uniform uint8 dst[])
- {
- const int h_4=src->height/4, w_4=src->width/4;
- for (uniform int yy = 0; yy<h_4; yy++)
- for (uniform int xx = 0; xx<w_4; xx++) // foreach (xx = 0 ... src->width/4)
- {
- CompressBlockBC3(src, xx, yy, dst);
- }
- }
- ///////////////////////////////////////////////////////////
- // BC7 encoding
- /*struct bc7_enc_settings
- {
- bool mode_selection[4];
- int refineIterations[8];
- bool skip_mode2;
- int fastSkipTreshold_mode1;
- int fastSkipTreshold_mode3;
- int fastSkipTreshold_mode7;
- int mode45_channel0;
- int refineIterations_channel;
- int channels;
- };*/
- struct bc7_enc_state
- {
- float block[64];
- float opaque_err; // error for coding alpha=255
- float best_err;
- uint32 best_data[5]; // 4, +1 margin for skips
- // settings
- uniform bool mode_selection[4];
- uniform int refineIterations[8];
- uniform bool skip_mode2;
- uniform int fastSkipTreshold_mode1;
- uniform int fastSkipTreshold_mode3;
- uniform int fastSkipTreshold_mode7;
- uniform int mode45_channel0;
- uniform int refineIterations_channel;
- uniform int channels;
- };
- struct mode45_parameters
- {
- int qep[8];
- uint32 qblock[2];
- int aqep[2];
- uint32 aqblock[2];
- int rotation;
- int swap;
- };
- void bc7_code_mode01237(uint32 data[5], int qep[6], uint32 qblock[2], int part_id, uniform int mode);
- void bc7_code_mode45(uint32 data[5], mode45_parameters params[], uniform int mode);
- void bc7_code_mode6(uint32 data[5], int qep[8], uint32 qblock[2]);
- ///////////////////////////
- // BC7 format data
- static const int unquant_table_2bits[] = {0, 21, 43, 64};
- static const int unquant_table_3bits[] = {0, 9, 18, 27, 37, 46, 55, 64};
- static const int unquant_table_4bits[] = {0, 4, 9, 13, 17, 21, 26, 30, 34, 38, 43, 47, 51, 55, 60, 64};
-
- static const int* uniform unquant_tables[] = {unquant_table_2bits, unquant_table_3bits, unquant_table_4bits};
- inline uniform const int* uniform get_unquant_table(uniform int bits)
- {
- assert(bits>=2 && bits<=4); // invalid bit size
- return unquant_tables[bits-2];
- }
- static const uint32 pattern_table[] = {
- 0x50505050u, 0x40404040u, 0x54545454u, 0x54505040u, 0x50404000u, 0x55545450u, 0x55545040u, 0x54504000u,
- 0x50400000u, 0x55555450u, 0x55544000u, 0x54400000u, 0x55555440u, 0x55550000u, 0x55555500u, 0x55000000u,
- 0x55150100u, 0x00004054u, 0x15010000u, 0x00405054u, 0x00004050u, 0x15050100u, 0x05010000u, 0x40505054u,
- 0x00404050u, 0x05010100u, 0x14141414u, 0x05141450u, 0x01155440u, 0x00555500u, 0x15014054u, 0x05414150u,
- 0x44444444u, 0x55005500u, 0x11441144u, 0x05055050u, 0x05500550u, 0x11114444u, 0x41144114u, 0x44111144u,
- 0x15055054u, 0x01055040u, 0x05041050u, 0x05455150u, 0x14414114u, 0x50050550u, 0x41411414u, 0x00141400u,
- 0x00041504u, 0x00105410u, 0x10541000u, 0x04150400u, 0x50410514u, 0x41051450u, 0x05415014u, 0x14054150u,
- 0x41050514u, 0x41505014u, 0x40011554u, 0x54150140u, 0x50505500u, 0x00555050u, 0x15151010u, 0x54540404u,
- 0xAA685050u, 0x6A5A5040u, 0x5A5A4200u, 0x5450A0A8u, 0xA5A50000u, 0xA0A05050u, 0x5555A0A0u, 0x5A5A5050u,
- 0xAA550000u, 0xAA555500u, 0xAAAA5500u, 0x90909090u, 0x94949494u, 0xA4A4A4A4u, 0xA9A59450u, 0x2A0A4250u,
- 0xA5945040u, 0x0A425054u, 0xA5A5A500u, 0x55A0A0A0u, 0xA8A85454u, 0x6A6A4040u, 0xA4A45000u, 0x1A1A0500u,
- 0x0050A4A4u, 0xAAA59090u, 0x14696914u, 0x69691400u, 0xA08585A0u, 0xAA821414u, 0x50A4A450u, 0x6A5A0200u,
- 0xA9A58000u, 0x5090A0A8u, 0xA8A09050u, 0x24242424u, 0x00AA5500u, 0x24924924u, 0x24499224u, 0x50A50A50u,
- 0x500AA550u, 0xAAAA4444u, 0x66660000u, 0xA5A0A5A0u, 0x50A050A0u, 0x69286928u, 0x44AAAA44u, 0x66666600u,
- 0xAA444444u, 0x54A854A8u, 0x95809580u, 0x96969600u, 0xA85454A8u, 0x80959580u, 0xAA141414u, 0x96960000u,
- 0xAAAA1414u, 0xA05050A0u, 0xA0A5A5A0u, 0x96000000u, 0x40804080u, 0xA9A8A9A8u, 0xAAAAAA44u, 0x2A4A5254u
- };
- inline uint32 get_pattern(int part_id)
- {
- return gather_uint(pattern_table, part_id);
- }
- static const uint32 pattern_mask_table[] = {
- 0xCCCC3333u, 0x88887777u, 0xEEEE1111u, 0xECC81337u, 0xC880377Fu, 0xFEEC0113u, 0xFEC80137u, 0xEC80137Fu,
- 0xC80037FFu, 0xFFEC0013u, 0xFE80017Fu, 0xE80017FFu, 0xFFE80017u, 0xFF0000FFu, 0xFFF0000Fu, 0xF0000FFFu,
- 0xF71008EFu, 0x008EFF71u, 0x71008EFFu, 0x08CEF731u, 0x008CFF73u, 0x73108CEFu, 0x3100CEFFu, 0x8CCE7331u,
- 0x088CF773u, 0x3110CEEFu, 0x66669999u, 0x366CC993u, 0x17E8E817u, 0x0FF0F00Fu, 0x718E8E71u, 0x399CC663u,
- 0xAAAA5555u, 0xF0F00F0Fu, 0x5A5AA5A5u, 0x33CCCC33u, 0x3C3CC3C3u, 0x55AAAA55u, 0x96966969u, 0xA55A5AA5u,
- 0x73CE8C31u, 0x13C8EC37u, 0x324CCDB3u, 0x3BDCC423u, 0x69969669u, 0xC33C3CC3u, 0x99666699u, 0x0660F99Fu,
- 0x0272FD8Du, 0x04E4FB1Bu, 0x4E40B1BFu, 0x2720D8DFu, 0xC93636C9u, 0x936C6C93u, 0x39C6C639u, 0x639C9C63u,
- 0x93366CC9u, 0x9CC66339u, 0x817E7E81u, 0xE71818E7u, 0xCCF0330Fu, 0x0FCCF033u, 0x774488BBu, 0xEE2211DDu,
- 0x08CC0133u, 0x8CC80037u, 0xCC80006Fu, 0xEC001331u, 0x330000FFu, 0x00CC3333u, 0xFF000033u, 0xCCCC0033u,
- 0x0F0000FFu, 0x0FF0000Fu, 0x00F0000Fu, 0x44443333u, 0x66661111u, 0x22221111u, 0x136C0013u, 0x008C8C63u,
- 0x36C80137u, 0x08CEC631u, 0x3330000Fu, 0xF0000333u, 0x00EE1111u, 0x88880077u, 0x22C0113Fu, 0x443088CFu,
- 0x0C22F311u, 0x03440033u, 0x69969009u, 0x9960009Fu, 0x03303443u, 0x00660699u, 0xC22C3113u, 0x8C0000EFu,
- 0x1300007Fu, 0xC4003331u, 0x004C1333u, 0x22229999u, 0x00F0F00Fu, 0x24929249u, 0x29429429u, 0xC30C30C3u,
- 0xC03C3C03u, 0x00AA0055u, 0xAA0000FFu, 0x30300303u, 0xC0C03333u, 0x90900909u, 0xA00A5005u, 0xAAA0000Fu,
- 0x0AAA0555u, 0xE0E01111u, 0x70700707u, 0x6660000Fu, 0x0EE01111u, 0x07707007u, 0x06660999u, 0x660000FFu,
- 0x00660099u, 0x0CC03333u, 0x03303003u, 0x60000FFFu, 0x80807777u, 0x10100101u, 0x000A0005u, 0x08CE8421u
- };
- inline int get_pattern_mask(int part_id, int j)
- {
- uint32 mask_packed = gather_uint(pattern_mask_table, part_id);
- int mask0 = mask_packed&0xFFFF;
- int mask1 = mask_packed>>16;
- int mask = (j==2) ? (~mask0)&(~mask1) : ( (j==0) ? mask0 : mask1 );
- return mask;
- }
- static const int skip_table[] = {
- 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u,
- 0xf0u, 0x20u, 0x80u, 0x20u, 0x20u, 0x80u, 0x80u, 0xf0u, 0x20u, 0x80u, 0x20u, 0x20u, 0x80u, 0x80u, 0x20u, 0x20u,
- 0xf0u, 0xf0u, 0x60u, 0x80u, 0x20u, 0x80u, 0xf0u, 0xf0u, 0x20u, 0x80u, 0x20u, 0x20u, 0x20u, 0xf0u, 0xf0u, 0x60u,
- 0x60u, 0x20u, 0x60u, 0x80u, 0xf0u, 0xf0u, 0x20u, 0x20u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0xf0u, 0x20u, 0x20u, 0xf0u,
- 0x3fu, 0x38u, 0xf8u, 0xf3u, 0x8fu, 0x3fu, 0xf3u, 0xf8u, 0x8fu, 0x8fu, 0x6fu, 0x6fu, 0x6fu, 0x5fu, 0x3fu, 0x38u,
- 0x3fu, 0x38u, 0x8fu, 0xf3u, 0x3fu, 0x38u, 0x6fu, 0xa8u, 0x53u, 0x8fu, 0x86u, 0x6au, 0x8fu, 0x5fu, 0xfau, 0xf8u,
- 0x8fu, 0xf3u, 0x3fu, 0x5au, 0x6au, 0xa8u, 0x89u, 0xfau, 0xf6u, 0x3fu, 0xf8u, 0x5fu, 0xf3u, 0xf6u, 0xf6u, 0xf8u,
- 0x3fu, 0xf3u, 0x5fu, 0x5fu, 0x5fu, 0x8fu, 0x5fu, 0xafu, 0x5fu, 0xafu, 0x8fu, 0xdfu, 0xf3u, 0xcfu, 0x3fu, 0x38u
- };
- inline void get_skips(int skips[3], int part_id)
- {
- int skip_packed = gather_int(skip_table, part_id);
- skips[0] = 0;
- skips[1] = skip_packed>>4;
- skips[2] = skip_packed&15;
- }
- ///////////////////////////
- // PCA helpers
- inline void compute_stats_masked(float stats[15], float block[64], int mask, uniform int channels)
- {
- for (uniform int i=0; i<15; i++) stats[i] = 0;
- int mask_shifted = mask<<1;
- for (uniform int k=0; k<16; k++)
- {
- mask_shifted >>= 1;
- //if ((mask_shifted&1) == 0) continue;
- int flag = (mask_shifted&1);
- float rgba[4];
- for (uniform int p=0; p<channels; p++) rgba[p] = block[k+p*16];
-
- for (uniform int p=0; p<channels; p++) rgba[p] *= flag;
- stats[14] += flag;
- stats[10] += rgba[0];
- stats[11] += rgba[1];
- stats[12] += rgba[2];
- stats[0] += rgba[0]*rgba[0];
- stats[1] += rgba[0]*rgba[1];
- stats[2] += rgba[0]*rgba[2];
- stats[4] += rgba[1]*rgba[1];
- stats[5] += rgba[1]*rgba[2];
- stats[7] += rgba[2]*rgba[2];
- if (channels==4)
- {
- stats[13] += rgba[3];
- stats[3] += rgba[0]*rgba[3];
- stats[6] += rgba[1]*rgba[3];
- stats[8] += rgba[2]*rgba[3];
- stats[9] += rgba[3]*rgba[3];
- }
- }
- }
- inline void covar_from_stats(float covar[10], float stats[15], uniform int channels)
- {
- covar[0] = stats[0] - stats[10+0]*stats[10+0]/stats[14];
- covar[1] = stats[1] - stats[10+0]*stats[10+1]/stats[14];
- covar[2] = stats[2] - stats[10+0]*stats[10+2]/stats[14];
- covar[4] = stats[4] - stats[10+1]*stats[10+1]/stats[14];
- covar[5] = stats[5] - stats[10+1]*stats[10+2]/stats[14];
- covar[7] = stats[7] - stats[10+2]*stats[10+2]/stats[14];
- if (channels == 4)
- {
- covar[3] = stats[3] - stats[10+0]*stats[10+3]/stats[14];
- covar[6] = stats[6] - stats[10+1]*stats[10+3]/stats[14];
- covar[8] = stats[8] - stats[10+2]*stats[10+3]/stats[14];
- covar[9] = stats[9] - stats[10+3]*stats[10+3]/stats[14];
- }
- }
- inline void compute_covar_dc_masked(float covar[6], float dc[3], float block[64], int mask, uniform int channels)
- {
- float stats[15];
- compute_stats_masked(stats, block, mask, channels);
- covar_from_stats(covar, stats, channels);
- for (uniform int p=0; p<channels; p++) dc[p] = stats[10+p]/stats[14];
- }
- void block_pca_axis(float axis[4], float dc[4], float block[64], int mask, uniform int channels)
- {
- uniform const int powerIterations = 8; // 4 not enough for HQ
- float covar[10];
- compute_covar_dc_masked(covar, dc, block, mask, channels);
- //float var = covar[0] + covar[4] + covar[7] + covar[9] + 256;
- float inv_var = 1.0f / (256 * 256);
- for (uniform int k = 0; k < 10; k++)
- {
- covar[k] *= inv_var;
- }
- float eps = sq(0.001f);
- covar[0] += eps;
- covar[4] += eps;
- covar[7] += eps;
- covar[9] += eps;
- compute_axis(axis, covar, powerIterations, channels);
- }
- void block_segment_core(float ep[], float block[64], int mask, uniform int channels)
- {
- float axis[4];
- float dc[4];
- block_pca_axis(axis, dc, block, mask, channels);
-
- float ext[2];
- ext[0] = +FLT_MAX;
- ext[1] = -FLT_MAX;
- // find min/max
- int mask_shifted = mask<<1;
- for (uniform int k=0; k<16; k++)
- {
- mask_shifted >>= 1;
- if ((mask_shifted&1) == 0) continue;
- float dot = 0;
- for (uniform int p=0; p<channels; p++)
- dot += axis[p]*(block[16*p+k]-dc[p]);
- ext[0] = min(ext[0], dot);
- ext[1] = max(ext[1], dot);
- }
- // create some distance if the endpoints collapse
- if (ext[1]-ext[0] < 1.0f)
- {
- ext[0] -= 0.5f;
- ext[1] += 0.5f;
- }
- for (uniform int i=0; i<2; i++)
- for (uniform int p=0; p<channels; p++)
- {
- ep[4*i+p] = ext[i]*axis[p]+dc[p];
- }
- }
- void block_segment(float ep[], float block[64], int mask, uniform int channels)
- {
- block_segment_core(ep, block, mask, channels);
- for (uniform int i=0; i<2; i++)
- for (uniform int p=0; p<channels; p++)
- {
- ep[4*i+p] = clamp(ep[4*i+p], 0, 255);
- }
- }
- float get_pca_bound(float covar[10], uniform int channels)
- {
- uniform const int powerIterations = 4; // quite approximative, but enough for bounding
- float inv_var = 1.0 / (256 * 256);
- for (uniform int k = 0; k < 10; k++)
- {
- covar[k] *= inv_var;
- }
- float eps = sq(0.001f);
- covar[0] += eps;
- covar[4] += eps;
- covar[7] += eps;
- float axis[4];
- compute_axis(axis, covar, powerIterations, channels);
- float vec[4];
- if (channels == 3) ssymv3(vec, covar, axis);
- if (channels == 4) ssymv4(vec, covar, axis);
- float sq_sum = 0.0f;
- for (uniform int p=0; p<channels; p++) sq_sum += sq(vec[p]);
- float lambda = sqrt(sq_sum);
- float bound = covar[0]+covar[4]+covar[7];
- if (channels == 4) bound += covar[9];
- bound -= lambda;
- bound = max(bound, 0);
- return bound;
- }
- float block_pca_bound(float block[64], int mask, uniform int channels)
- {
- float stats[15];
- compute_stats_masked(stats, block, mask, channels);
- float covar[10];
- covar_from_stats(covar, stats, channels);
- return get_pca_bound(covar, channels);
- }
- float block_pca_bound_split(float block[64], int mask, float full_stats[15], uniform int channels)
- {
- float stats[15];
- compute_stats_masked(stats, block, mask, channels);
-
- float covar1[10];
- covar_from_stats(covar1, stats, channels);
-
- for (uniform int i=0; i<15; i++)
- stats[i] = full_stats[i] - stats[i];
- float covar2[10];
- covar_from_stats(covar2, stats, channels);
- float bound = 0.0f;
- bound += get_pca_bound(covar1, channels);
- bound += get_pca_bound(covar2, channels);
- return sqrt(bound)*256;
- }
- ///////////////////////////
- // endpoint quantization
- inline int unpack_to_byte(int v, uniform const int bits)
- {
- assert(bits >= 4);
- int vv = v<<(8-bits);
- return vv + shift_right(vv, bits);
- }
- void ep_quant0367(int qep[], float ep[], uniform int mode, uniform int channels)
- {
- uniform int bits = 7;
- if (mode == 0) bits = 4;
- if (mode == 7) bits = 5;
- uniform int levels = 1 << bits;
- uniform int levels2 = levels*2-1;
-
- for (uniform int i=0; i<2; i++)
- {
- int qep_b[8];
-
- for (uniform int b=0; b<2; b++)
- for (uniform int p=0; p<4; p++)
- {
- int v = (int)((ep[i*4+p]/255.0f*levels2-b)/2+0.5f)*2+b;
- qep_b[b*4+p] = clamp(v, b, levels2-1+b);
- }
- float ep_b[8];
- for (uniform int j=0; j<8; j++)
- ep_b[j] = qep_b[j];
- if (mode==0)
- for (uniform int j=0; j<8; j++)
- ep_b[j] = unpack_to_byte(qep_b[j], 5);
-
- float err0 = 0.0f;
- float err1 = 0.0f;
- for (uniform int p=0; p<channels; p++)
- {
- err0 += sq(ep[i*4+p]-ep_b[0+p]);
- err1 += sq(ep[i*4+p]-ep_b[4+p]);
- }
- if(channels==4 && ep[i*4+3]<=0.5f)err0=-1; // ESENTHEL CHANGED, BC7 allows to encode end points in 2 quantized modes, #1 standard, #2 add "0.5*levels" to all channels (1 extra bit precision, however it affects all channels at the same time, so if we have alpha=0, but RGB channels have smaller error with the extra 0.5 value, then alpha would get the +0.5 too, and it could destroy complete transparency, so this code always forces #1 version if we have alpha=0)
- for (uniform int p=0; p<4; p++)
- qep[i*4+p] = (err0<err1) ? qep_b[0+p] : qep_b[4+p];
- }
- }
- void ep_quant1(int qep[], float ep[], uniform int mode)
- {
- int qep_b[16];
-
- for (uniform int b=0; b<2; b++)
- for (uniform int i=0; i<8; i++)
- {
- int v = ((int)((ep[i]/255.0f*127.0f-b)/2+0.5f))*2+b;
- qep_b[b*8+i] = clamp(v, b, 126+b);
- }
-
- // dequant
- float ep_b[16];
- for (uniform int k=0; k<16; k++)
- ep_b[k] = unpack_to_byte(qep_b[k], 7);
- float err0 = 0.0f;
- float err1 = 0.0f;
- for (uniform int j = 0; j < 2; j++)
- for (uniform int p = 0; p < 3; p++)
- {
- err0 += sq(ep[j * 4 + p] - ep_b[0 + j * 4 + p]);
- err1 += sq(ep[j * 4 + p] - ep_b[8 + j * 4 + p]);
- }
- for (uniform int i=0; i<8; i++)
- qep[i] = (err0<err1) ? qep_b[0+i] : qep_b[8+i];
- }
- void ep_quant245(int qep[], float ep[], uniform int mode)
- {
- uniform int bits = 5;
- if (mode == 5) bits = 7;
- uniform int levels = 1 << bits;
-
- for (uniform int i=0; i<8; i++)
- {
- int v = ((int)(ep[i]/255.0f*(levels-1)+0.5f));
- qep[i] = clamp(v, 0, levels-1);
- }
- }
- static const int pairs_table[] = {3,2,3,2,1,1,1,2};
- void ep_quant(int qep[], float ep[], uniform int mode, uniform int channels)
- {
- assert(mode <= 7);
- uniform const int pairs = pairs_table[mode];
- if (mode == 0 || mode == 3 || mode == 6 || mode == 7)
- {
- for (uniform int i=0; i<pairs; i++)
- ep_quant0367(&qep[i*8], &ep[i*8], mode, channels);
- }
- else if (mode == 1)
- {
- for (uniform int i=0; i<pairs; i++)
- ep_quant1(&qep[i*8], &ep[i*8], mode);
- }
- else if (mode == 2 || mode == 4 || mode == 5)
- {
- for (uniform int i=0; i<pairs; i++)
- ep_quant245(&qep[i*8], &ep[i*8], mode);
- }
- else
- assert(false);
- }
- void ep_dequant(float ep[], int qep[], uniform int mode)
- {
- assert(mode <= 7);
- uniform const int pairs = pairs_table[mode];
-
- // mode 3, 6 are 8-bit
- if (mode == 3 || mode == 6)
- {
- for (uniform int i=0; i<8*pairs; i++)
- ep[i] = qep[i];
- }
- else if (mode == 1 || mode == 5)
- {
- for (uniform int i=0; i<8*pairs; i++)
- ep[i] = unpack_to_byte(qep[i], 7);
- }
- else if (mode == 0 || mode == 2 || mode == 4)
- {
- for (uniform int i=0; i<8*pairs; i++)
- ep[i] = unpack_to_byte(qep[i], 5);
- }
- else if (mode == 7)
- {
- for (uniform int i=0; i<8*pairs; i++)
- ep[i] = unpack_to_byte(qep[i], 6);
- }
- else
- assert(false);
- }
- void ep_quant_dequant(int qep[], float ep[], uniform int mode, uniform int channels)
- {
- ep_quant(qep, ep, mode, channels);
- ep_dequant(ep, qep, mode);
- }
- ///////////////////////////
- // pixel quantization
- float block_quant(uint32 qblock[2], float block[64], uniform int bits, float ep[], uint32 pattern, uniform int channels)
- {
- float total_err = 0;
- uniform const int* uniform unquant_table = get_unquant_table(bits);
- int levels = 1 << bits;
- // 64-bit qblock: 5% overhead in this function
- for (uniform int k=0; k<2; k++) qblock[k] = 0;
- int pattern_shifted = pattern;
- for (uniform int k=0; k<16; k++)
- {
- int j = pattern_shifted&3;
- pattern_shifted >>= 2;
- float proj = 0;
- float div = 0;
- for (uniform int p=0; p<channels; p++)
- {
- float ep_a = gather_float(ep, 8*j+0+p);
- float ep_b = gather_float(ep, 8*j+4+p);
- proj += (block[k+p*16]-ep_a)*(ep_b-ep_a);
- div += sq(ep_b-ep_a);
- }
-
- proj /= div;
-
- int q1 = (int)(proj*levels+0.5f);
- q1 = clamp(q1, 1, levels-1);
-
- float err0 = 0;
- float err1 = 0;
- int w0 = gather_int(unquant_table, q1-1);
- int w1 = gather_int(unquant_table, q1);
- for (uniform int p=0; p<channels; p++)
- {
- float ep_a = gather_float(ep, 8*j+0+p);
- float ep_b = gather_float(ep, 8*j+4+p);
- float dec_v0 = (int)(((64-w0)*ep_a + w0*ep_b + 32)/64);
- float dec_v1 = (int)(((64-w1)*ep_a + w1*ep_b + 32)/64);
- err0 += sq(dec_v0 - block[k+p*16]);
- err1 += sq(dec_v1 - block[k+p*16]);
- }
-
- int best_err = err1;
- int best_q = q1;
- if (err0<err1)
- {
- best_err = err0;
- best_q = q1-1;
- }
- assert(best_q>=0 && best_q<=levels-1);
- qblock[k/8] += ((uint32)best_q) << 4*(k%8);
- total_err += best_err;
- }
- return total_err;
- }
- ///////////////////////////
- // LS endpoint refinement
- void opt_endpoints(float ep[], float block[64], uniform int bits, uint32 qblock[2], int mask, uniform int channels)
- {
- uniform int levels = 1 << bits;
-
- float Atb1[4] = {0,0,0,0};
- float sum_q = 0;
- float sum_qq = 0;
- float sum[5] = {0,0,0,0,0};
-
- int mask_shifted = mask<<1;
- for (uniform int k1=0; k1<2; k1++)
- {
- uint32 qbits_shifted = qblock[k1];
- for (uniform int k2=0; k2<8; k2++)
- {
- uniform int k = k1*8+k2;
- float q = (int)(qbits_shifted&15);
- qbits_shifted >>= 4;
- mask_shifted >>= 1;
- if ((mask_shifted&1) == 0) continue;
-
- int x = (levels-1)-q;
- int y = q;
-
- sum_q += q;
- sum_qq += q*q;
- sum[4] += 1;
- for (uniform int p=0; p<channels; p++) sum[p] += block[k+p*16];
- for (uniform int p=0; p<channels; p++) Atb1[p] += x*block[k+p*16];
- }
- }
-
- float Atb2[4];
- for (uniform int p=0; p<channels; p++)
- {
- //sum[p] = dc[p]*16;
- Atb2[p] = (levels-1)*sum[p]-Atb1[p];
- }
-
- float Cxx = sum[4]*sq(levels-1)-2*(levels-1)*sum_q+sum_qq;
- float Cyy = sum_qq;
- float Cxy = (levels-1)*sum_q-sum_qq;
- float scale = (levels-1) / (Cxx*Cyy - Cxy*Cxy);
- for (uniform int p=0; p<channels; p++)
- {
- ep[0+p] = (Atb1[p]*Cyy - Atb2[p]*Cxy)*scale;
- ep[4+p] = (Atb2[p]*Cxx - Atb1[p]*Cxy)*scale;
-
- //ep[0+p] = clamp(ep[0+p], 0, 255);
- //ep[4+p] = clamp(ep[4+p], 0, 255);
- }
- if (abs(Cxx*Cyy - Cxy*Cxy) < 0.001f)
- {
- // flatten
- for (uniform int p=0; p<channels; p++)
- {
- ep[0+p] = sum[p]/sum[4];
- ep[4+p] = ep[0+p];
- }
- }
- }
- //////////////////////////
- // parameter estimation
- float compute_opaque_err(float block[64], uniform int channels)
- {
- if (channels == 3) return 0;
- float err = 0.0f;
- for (uniform int k=0; k<16; k++)
- {
- err += sq(block[48+k]-255);
- }
- return err;
- }
- float bc7_enc_mode01237_part_fast(int qep[24], uint32 qblock[2], float block[64], int part_id, uniform int mode)
- {
- uint32 pattern = get_pattern(part_id);
- uniform int bits = 2; if (mode == 0 || mode == 1) bits = 3;
- uniform int pairs = 2; if (mode == 0 || mode == 2) pairs = 3;
- uniform int channels = 3; if (mode == 7) channels = 4;
- float ep[24];
- for (uniform int j=0; j<pairs; j++)
- {
- int mask = get_pattern_mask(part_id, j);
- block_segment(&ep[j*8], block, mask, channels);
- }
- ep_quant_dequant(qep, ep, mode, channels);
- float total_err = block_quant(qblock, block, bits, ep, pattern, channels);
- return total_err;
- }
- void bc7_enc_mode01237(bc7_enc_state state[], uniform int mode, int part_list[], uniform int part_count)
- {
- if (part_count == 0) return;
- uniform int bits = 2; if (mode == 0 || mode == 1) bits = 3;
- uniform int pairs = 2; if (mode == 0 || mode == 2) pairs = 3;
- uniform int channels = 3; if (mode == 7) channels = 4;
- int best_qep[24];
- uint32 best_qblock[2];
- int best_part_id = -1;
- float best_err = FLT_MAX;
- for (uniform int part=0; part<part_count; part++)
- {
- int part_id = part_list[part]&63;
- if (pairs == 3) part_id += 64;
- int qep[24];
- uint32 qblock[2];
- float err = bc7_enc_mode01237_part_fast(qep, qblock, state->block, part_id, mode);
-
- if (err<best_err)
- {
- for (uniform int i=0; i<8*pairs; i++) best_qep[i] = qep[i];
- for (uniform int k=0; k<2; k++) best_qblock[k] = qblock[k];
- best_part_id = part_id;
- best_err = err;
- }
- }
-
- // refine
- uniform int refineIterations = state->refineIterations[mode];
- for (uniform int _=0; _<refineIterations; _++)
- {
- float ep[24];
- for (uniform int j=0; j<pairs; j++)
- {
- int mask = get_pattern_mask(best_part_id, j);
- opt_endpoints(&ep[j*8], state->block, bits, best_qblock, mask, channels);
- }
- int qep[24];
- uint32 qblock[2];
- ep_quant_dequant(qep, ep, mode, channels);
-
- uint32 pattern = get_pattern(best_part_id);
- float err = block_quant(qblock, state->block, bits, ep, pattern, channels);
- if (err<best_err)
- {
- for (uniform int i=0; i<8*pairs; i++) best_qep[i] = qep[i];
- for (uniform int k=0; k<2; k++) best_qblock[k] = qblock[k];
- best_err = err;
- }
- }
-
- if (mode != 7) best_err += state->opaque_err; // take into account alpha channel
- if (best_err<state->best_err)
- {
- state->best_err = best_err;
- bc7_code_mode01237(state->best_data, best_qep, best_qblock, best_part_id, mode);
- }
- }
- void partial_sort_list(int list[], uniform int length, uniform int partial_count)
- {
- for (uniform int k=0; k<partial_count; k++)
- {
- int best_idx = k;
- int best_value = list[k];
- for (uniform int i=k+1; i<length; i++)
- {
- if (best_value > list[i])
- {
- best_value = list[i];
- best_idx = i;
- }
- }
- // swap
- scatter_int(list, best_idx, list[k]);
- list[k] = best_value;
- }
- }
- void bc7_enc_mode02(bc7_enc_state state[])
- {
- int part_list[64];
- for (uniform int part=0; part<64; part++)
- part_list[part] = part;
- bc7_enc_mode01237(state, 0, part_list, 16);
- if (!state->skip_mode2) bc7_enc_mode01237(state, 2, part_list, 64); // usually not worth the time
- }
- void bc7_enc_mode13(bc7_enc_state state[])
- {
- if (state->fastSkipTreshold_mode1 == 0 && state->fastSkipTreshold_mode3 == 0) return;
- float full_stats[15];
- compute_stats_masked(full_stats, state->block, -1, 3);
- int part_list[64];
- for (uniform int part=0; part<64; part++)
- {
- int mask = get_pattern_mask(part+0, 0);
- float bound12 = block_pca_bound_split(state->block, mask, full_stats, 3);
- int bound = (int)(bound12);
- part_list[part] = part+bound*64;
- }
- partial_sort_list(part_list, 64, max(state->fastSkipTreshold_mode1, state->fastSkipTreshold_mode3));
- bc7_enc_mode01237(state, 1, part_list, state->fastSkipTreshold_mode1);
- bc7_enc_mode01237(state, 3, part_list, state->fastSkipTreshold_mode3);
- }
- void bc7_enc_mode7(bc7_enc_state state[])
- {
- if (state->fastSkipTreshold_mode7 == 0) return;
- float full_stats[15];
- compute_stats_masked(full_stats, state->block, -1, state->channels);
- int part_list[64];
- for (uniform int part=0; part<64; part++)
- {
- int mask = get_pattern_mask(part+0, 0);
- float bound12 = block_pca_bound_split(state->block, mask, full_stats, state->channels);
- int bound = (int)(bound12);
- part_list[part] = part+bound*64;
- }
- partial_sort_list(part_list, 64, state->fastSkipTreshold_mode7);
- bc7_enc_mode01237(state, 7, part_list, state->fastSkipTreshold_mode7);
- }
- void channel_quant_dequant(int qep[2], float ep[2], uniform int epbits)
- {
- int elevels = (1<<epbits);
- for (uniform int i=0; i<2; i++)
- {
- int v = ((int)(ep[i]/255.0f*(elevels-1)+0.5f));
- qep[i] = clamp(v, 0, elevels-1);
- ep[i] = unpack_to_byte(qep[i], epbits);
- }
- }
- void channel_opt_endpoints(float ep[2], float block[16], uniform int bits, uint32 qblock[2])
- {
- uniform int levels = 1 << bits;
- float Atb1 = 0;
- float sum_q = 0;
- float sum_qq = 0;
- float sum = 0;
-
- for (uniform int k1=0; k1<2; k1++)
- {
- uint32 qbits_shifted = qblock[k1];
- for (uniform int k2=0; k2<8; k2++)
- {
- uniform int k = k1*8+k2;
- float q = (int)(qbits_shifted&15);
- qbits_shifted >>= 4;
- int x = (levels-1)-q;
- int y = q;
-
- sum_q += q;
- sum_qq += q*q;
- sum += block[k];
- Atb1 += x*block[k];
- }
- }
-
- float Atb2 = (levels-1)*sum-Atb1;
-
- float Cxx = 16*sq(levels-1)-2*(levels-1)*sum_q+sum_qq;
- float Cyy = sum_qq;
- float Cxy = (levels-1)*sum_q-sum_qq;
- float scale = (levels-1) / (Cxx*Cyy - Cxy*Cxy);
- ep[0] = (Atb1*Cyy - Atb2*Cxy)*scale;
- ep[1] = (Atb2*Cxx - Atb1*Cxy)*scale;
-
- ep[0] = clamp(ep[0], 0, 255);
- ep[1] = clamp(ep[1], 0, 255);
- if (abs(Cxx*Cyy - Cxy*Cxy) < 0.001f)
- {
- ep[0] = sum/16;
- ep[1] = ep[0];
- }
- }
- float channel_opt_quant(uint32 qblock[2], float block[16], uniform int bits, float ep[])
- {
- uniform const int* uniform unquant_table = get_unquant_table(bits);
- int levels = (1<<bits);
- qblock[0] = 0;
- qblock[1] = 0;
- float total_err = 0;
- for (uniform int k=0; k<16; k++)
- {
- float proj = (block[k]-ep[0])/(ep[1]-ep[0]+0.001f);
- int q1 = (int)(proj*levels+0.5f);
- q1 = clamp(q1, 1, levels-1);
- float err0 = 0;
- float err1 = 0;
- int w0 = gather_int(unquant_table, q1-1);
- int w1 = gather_int(unquant_table, q1);
-
- float dec_v0 = (int)(((64-w0)*ep[0] + w0*ep[1] + 32)/64);
- float dec_v1 = (int)(((64-w1)*ep[0] + w1*ep[1] + 32)/64);
- err0 += sq(dec_v0 - block[k]);
- err1 += sq(dec_v1 - block[k]);
- int best_err = err1;
- int best_q = q1;
- if (err0<err1)
- {
- best_err = err0;
- best_q = q1-1;
- }
- qblock[k/8] += ((uint32)best_q) << 4*(k%8);
- total_err += best_err;
- }
- return total_err;
- }
- float opt_channel(bc7_enc_state state[], uint32 qblock[2], int qep[2], float block[16], uniform int bits, uniform int epbits)
- {
- float ep[2] = {255,0};
- for (uniform int k=0; k<16; k++)
- {
- ep[0] = min(ep[0], block[k]);
- ep[1] = max(ep[1], block[k]);
- }
- channel_quant_dequant(qep, ep, epbits);
- float err = channel_opt_quant(qblock, block, bits, ep);
-
- // refine
- uniform const int refineIterations = state->refineIterations_channel;
- for (uniform int i=0; i<refineIterations; i++)
- {
- channel_opt_endpoints(ep, block, bits, qblock);
- channel_quant_dequant(qep, ep, epbits);
- err = channel_opt_quant(qblock, block, bits, ep);
- }
- return err;
- }
- void bc7_enc_mode45_candidate(bc7_enc_state state[], mode45_parameters best_candidate[],
- float best_err[], uniform int mode, uniform int rotation, uniform int swap)
- {
- uniform int bits = 2;
- uniform int abits = 2; if (mode==4) abits = 3;
- uniform int aepbits = 8; if (mode==4) aepbits = 6;
- if (swap==1) { bits = 3; abits = 2; } // (mode 4)
- float block[48];
- for (uniform int k=0; k<16; k++)
- {
- for (uniform int p=0; p<3; p++)
- block[k+p*16] = state->block[k+p*16];
- if (rotation < 3)
- {
- // apply channel rotation
- if (state->channels == 4) block[k+rotation*16] = state->block[k+3*16];
- if (state->channels == 3) block[k+rotation*16] = 255;
- }
- }
-
- float ep[8];
- block_segment(ep, block, -1, 3);
- int qep[8];
- ep_quant_dequant(qep, ep, mode, 3);
- uint32 qblock[2];
- float err = block_quant(qblock, block, bits, ep, 0, 3);
-
- // refine
- uniform int refineIterations = state->refineIterations[mode];
- for (uniform int i=0; i<refineIterations; i++)
- {
- opt_endpoints(ep, block, bits, qblock, -1, 3);
- ep_quant_dequant(qep, ep, mode, 3);
- err = block_quant(qblock, block, bits, ep, 0, 3);
- }
- // encoding selected channel
- int aqep[2];
- uint32 aqblock[2];
- err += opt_channel(state, aqblock, aqep, &state->block[rotation*16], abits, aepbits);
- if (err<*best_err)
- {
-
- swap_ints(best_candidate->qep, qep, 8);
- swap_uints(best_candidate->qblock, qblock, 2);
- swap_ints(best_candidate->aqep, aqep, 2);
- swap_uints(best_candidate->aqblock, aqblock, 2);
- best_candidate->rotation = rotation;
- best_candidate->swap = swap;
- *best_err = err;
- }
- }
- void bc7_enc_mode45(bc7_enc_state state[])
- {
- mode45_parameters best_candidate;
- float best_err = state->best_err;
- memset(&best_candidate, 0, sizeof(mode45_parameters));
- uniform int channel0 = state->mode45_channel0;
- for (uniform int p=channel0; p<state->channels; p++)
- {
- bc7_enc_mode45_candidate(state, &best_candidate, &best_err, 4, p, 0);
- bc7_enc_mode45_candidate(state, &best_candidate, &best_err, 4, p, 1);
- }
- // mode 4
- if (best_err<state->best_err)
- {
- state->best_err = best_err;
- bc7_code_mode45(state->best_data, &best_candidate, 4);
- }
-
- for (uniform int p=channel0; p<state->channels; p++)
- {
- bc7_enc_mode45_candidate(state, &best_candidate, &best_err, 5, p, 0);
- }
- // mode 5
- if (best_err<state->best_err)
- {
- state->best_err = best_err;
- bc7_code_mode45(state->best_data, &best_candidate, 5);
- }
- }
- void bc7_enc_mode6(bc7_enc_state state[])
- {
- uniform int mode = 6;
- uniform int bits = 4;
- float ep[8];
- block_segment(ep, state->block, -1, state->channels);
-
- if (state->channels == 3)
- {
- ep[3] = ep[7] = 255;
- }
- int qep[8];
- ep_quant_dequant(qep, ep, mode, state->channels);
- uint32 qblock[2];
- float err = block_quant(qblock, state->block, bits, ep, 0, state->channels);
- // refine
- uniform int refineIterations = state->refineIterations[mode];
- for (uniform int i=0; i<refineIterations; i++)
- {
- opt_endpoints(ep, state->block, bits, qblock, -1, state->channels);
- ep_quant_dequant(qep, ep, mode, state->channels);
- err = block_quant(qblock, state->block, bits, ep, 0, state->channels);
- }
-
- if (err<state->best_err)
- {
- state->best_err = err;
- bc7_code_mode6(state->best_data, qep, qblock);
- }
- }
- //////////////////////////
- // BC7 bitstream coding
- void bc7_code_apply_swap_mode456(int qep[], uniform int channels, uint32 qblock[2], uniform int bits)
- {
- uniform int levels = 1 << bits;
- if ((qblock[0]&15)>=levels/2)
- {
- swap_ints(&qep[0], &qep[channels], channels);
-
- for (uniform int k=0; k<2; k++)
- qblock[k] = (uint32)(0x11111111*(levels-1)) - qblock[k];
- }
- assert((qblock[0]&15) < levels/2);
- }
- int bc7_code_apply_swap_mode01237(int qep[], uint32 qblock[2], uniform int mode, int part_id)
- {
- uniform int bits = 2; if (mode == 0 || mode == 1) bits = 3;
- uniform int pairs = 2; if (mode == 0 || mode == 2) pairs = 3;
- int flips = 0;
- uniform int levels = 1 << bits;
- int skips[3];
- get_skips(skips, part_id);
- for (uniform int j=0; j<pairs; j++)
- {
- int k0 = skips[j];
- //int q = (qblock[k0/8]>>((k0%8)*4))&15;
- int q = ((gather_uint(qblock, k0>>3)<<(28-(k0&7)*4))>>28);
-
- if (q>=levels/2)
- {
- swap_ints(&qep[8*j], &qep[8*j+4], 4);
- uint32 pmask = get_pattern_mask(part_id, j);
- flips |= pmask;
- }
- }
- return flips;
- }
- void put_bits(uint32 data[5], uniform int* uniform pos, uniform int bits, int v)
- {
- assert(v<pow2(bits));
- data[*pos/32] |= ((uint32)v) << (*pos%32);
- if (*pos%32+bits>32)
- {
- data[*pos/32+1] |= shift_right(v, 32-*pos%32);
- }
- *pos += bits;
- }
- inline void data_shl_1bit_from(uint32 data[5], int from)
- {
- if (from < 96)
- {
- assert(from > 64+10);
- uint32 shifted = (data[2]>>1) | (data[3]<<31);
- uint32 mask = (pow2(from-64)-1)>>1;
- data[2] = (mask&data[2]) | (~mask&shifted);
- data[3] = (data[3]>>1) | (data[4]<<31);
- data[4] = data[4]>>1;
- }
- else if (from < 128)
- {
- uint32 shifted = (data[3]>>1) | (data[4]<<31);
- uint32 mask = (pow2(from-96)-1)>>1;
- data[3] = (mask&data[3]) | (~mask&shifted);
- data[4] = data[4]>>1;
- }
- }
- void bc7_code_qblock(uint32 data[5], uniform int* uniform pPos, uint32 qblock[2], uniform int bits, int flips)
- {
- uniform int levels = 1 << bits;
- int flips_shifted = flips;
- for (uniform int k1=0; k1<2; k1++)
- {
- uint32 qbits_shifted = qblock[k1];
- for (uniform int k2=0; k2<8; k2++)
- {
- int q = qbits_shifted&15;
- if ((flips_shifted&1)>0) q = (levels-1)-q;
- if (k1==0 && k2==0) put_bits(data, pPos, bits-1, q);
- else put_bits(data, pPos, bits , q);
- qbits_shifted >>= 4;
- flips_shifted >>= 1;
- }
- }
- }
- void bc7_code_adjust_skip_mode01237(uint32 data[5], uniform int mode, int part_id)
- {
- uniform int bits = 2; if (mode == 0 || mode == 1) bits = 3;
- uniform int pairs = 2; if (mode == 0 || mode == 2) pairs = 3;
-
- int skips[3];
- get_skips(skips, part_id);
- if (pairs>2 && skips[1] < skips[2])
- {
- int t = skips[1]; skips[1] = skips[2]; skips[2] = t;
- }
- for (uniform int j=1; j<pairs; j++)
- {
- int k = skips[j];
- data_shl_1bit_from(data, 128+(pairs-1)-(15-k)*bits);
- }
- }
- void bc7_code_mode01237(uint32 data[5], int qep[], uint32 qblock[2], int part_id, uniform int mode)
- {
- uniform int bits = 2; if (mode == 0 || mode == 1) bits = 3;
- uniform int pairs = 2; if (mode == 0 || mode == 2) pairs = 3;
- uniform int channels = 3; if (mode == 7) channels = 4;
- int flips = bc7_code_apply_swap_mode01237(qep, qblock, mode, part_id);
- for (uniform int k=0; k<5; k++) data[k] = 0;
- uniform int pos = 0;
- // mode 0-3, 7
- put_bits(data, &pos, mode+1, 1<<mode);
-
- // partition
- if (mode==0)
- {
- put_bits(data, &pos, 4, part_id&15);
- }
- else
- {
- put_bits(data, &pos, 6, part_id&63);
- }
-
- // endpoints
- for (uniform int p=0; p<channels; p++)
- for (uniform int j=0; j<pairs*2; j++)
- {
- if (mode == 0)
- {
- put_bits(data, &pos, 4, qep[j*4+0+p]>>1);
- }
- else if (mode == 1)
- {
- put_bits(data, &pos, 6, qep[j*4+0+p]>>1);
- }
- else if (mode == 2)
- {
- put_bits(data, &pos, 5, qep[j*4+0+p]);
- }
- else if (mode == 3)
- {
- put_bits(data, &pos, 7, qep[j*4+0+p]>>1);
- }
- else if (mode == 7)
- {
- put_bits(data, &pos, 5, qep[j*4+0+p]>>1);
- }
- else
- {
- assert(false);
- }
- }
-
- // p bits
- if (mode == 1)
- for (uniform int j=0; j<2; j++)
- {
- put_bits(data, &pos, 1, qep[j*8]&1);
- }
-
- if (mode == 0 || mode == 3 || mode == 7)
- for (uniform int j=0; j<pairs*2; j++)
- {
- put_bits(data, &pos, 1, qep[j*4]&1);
- }
-
- // quantized values
- bc7_code_qblock(data, &pos, qblock, bits, flips);
- bc7_code_adjust_skip_mode01237(data, mode, part_id);
- }
- void bc7_code_mode45(uint32 data[5], varying mode45_parameters* uniform params, uniform int mode)
- {
- int qep[8];
- uint32 qblock[2];
- int aqep[2];
- uint32 aqblock[2];
- swap_ints(params->qep, qep, 8);
- swap_uints(params->qblock, qblock, 2);
- swap_ints(params->aqep, aqep, 2);
- swap_uints(params->aqblock, aqblock, 2);
- int rotation = params->rotation;
- int swap = params->swap;
-
- uniform int bits = 2;
- uniform int abits = 2; if (mode==4) abits = 3;
- uniform int epbits = 7; if (mode==4) epbits = 5;
- uniform int aepbits = 8; if (mode==4) aepbits = 6;
- if (!swap)
- {
- bc7_code_apply_swap_mode456(qep, 4, qblock, bits);
- bc7_code_apply_swap_mode456(aqep, 1, aqblock, abits);
- }
- else
- {
- swap_uints(qblock, aqblock, 2);
- bc7_code_apply_swap_mode456(aqep, 1, qblock, bits);
- bc7_code_apply_swap_mode456(qep, 4, aqblock, abits);
- }
- for (uniform int k=0; k<5; k++) data[k] = 0;
- uniform int pos = 0;
-
- // mode 4-5
- put_bits(data, &pos, mode+1, 1<<mode);
-
- // rotation
- //put_bits(data, &pos, 2, (rotation+1)%4);
- put_bits(data, &pos, 2, (rotation+1)&3);
-
- if (mode==4)
- {
- put_bits(data, &pos, 1, swap);
- }
-
- // endpoints
- for (uniform int p=0; p<3; p++)
- {
- put_bits(data, &pos, epbits, qep[0+p]);
- put_bits(data, &pos, epbits, qep[4+p]);
- }
-
- // alpha endpoints
- put_bits(data, &pos, aepbits, aqep[0]);
- put_bits(data, &pos, aepbits, aqep[1]);
-
- // quantized values
- bc7_code_qblock(data, &pos, qblock, bits, 0);
- bc7_code_qblock(data, &pos, aqblock, abits, 0);
- }
- void bc7_code_mode6(uint32 data[5], int qep[8], uint32 qblock[2])
- {
- bc7_code_apply_swap_mode456(qep, 4, qblock, 4);
- for (uniform int k=0; k<5; k++) data[k] = 0;
- uniform int pos = 0;
- // mode 6
- put_bits(data, &pos, 7, 64);
-
- // endpoints
- for (uniform int p=0; p<4; p++)
- {
- put_bits(data, &pos, 7, qep[0+p]>>1);
- put_bits(data, &pos, 7, qep[4+p]>>1);
- }
-
- // p bits
- put_bits(data, &pos, 1, qep[0]&1);
- put_bits(data, &pos, 1, qep[4]&1);
-
- // quantized values
- bc7_code_qblock(data, &pos, qblock, 4, 0);
- }
- //////////////////////////
- // BC7 core
- inline void CompressBlockBC7_core(bc7_enc_state state[])
- {
- if (state->mode_selection[0]) bc7_enc_mode02(state);
- if (state->mode_selection[1]) bc7_enc_mode13(state);
- if (state->mode_selection[1]) bc7_enc_mode7(state);
- if (state->mode_selection[2]) bc7_enc_mode45(state);
- if (state->mode_selection[3]) bc7_enc_mode6(state);
- }
- void bc7_enc_copy_settings(bc7_enc_state state[], uniform bc7_enc_settings settings[])
- {
- state->channels = settings->channels;
-
- // mode02
- state->mode_selection[0] = settings->mode_selection[0];
- state->skip_mode2 = settings->skip_mode2;
- state->refineIterations[0] = settings->refineIterations[0];
- state->refineIterations[2] = settings->refineIterations[2];
- // mode137
- state->mode_selection[1] = settings->mode_selection[1];
- state->fastSkipTreshold_mode1 = settings->fastSkipTreshold_mode1;
- state->fastSkipTreshold_mode3 = settings->fastSkipTreshold_mode3;
- state->fastSkipTreshold_mode7 = settings->fastSkipTreshold_mode7;
- state->refineIterations[1] = settings->refineIterations[1];
- state->refineIterations[3] = settings->refineIterations[3];
- state->refineIterations[7] = settings->refineIterations[7];
- // mode45
- state->mode_selection[2] = settings->mode_selection[2];
-
- state->mode45_channel0 = settings->mode45_channel0;
- state->refineIterations_channel = settings->refineIterations_channel;
- state->refineIterations[4] = settings->refineIterations[4];
- state->refineIterations[5] = settings->refineIterations[5];
- // mode6
- state->mode_selection[3] = settings->mode_selection[3];
- state->refineIterations[6] = settings->refineIterations[6];
- }
- inline void CompressBlockBC7(uniform rgba_surface src[], int xx, uniform int yy, uniform uint8 dst[],
- uniform bc7_enc_settings settings[])
- {
- bc7_enc_state _state;
- varying bc7_enc_state* uniform state = &_state;
- bc7_enc_copy_settings(state, settings);
- load_block_interleaved_rgba(state->block, src, xx, yy);
- state->best_err = FLT_MAX;
- state->opaque_err = compute_opaque_err(state->block, state->channels);
- CompressBlockBC7_core(state);
- store_data(dst, src->width, xx, yy, state->best_data, 4);
- }
- export void CompressBlocksBC7_ispc(uniform rgba_surface src[], uniform uint8 dst[], uniform bc7_enc_settings settings[])
- {
- const int h_4=src->height/4, w_4=src->width/4;
- for (uniform int yy = 0; yy<h_4; yy++)
- for (uniform int xx = 0; xx<w_4; xx++) // foreach (xx = 0 ... src->width/4)
- {
- CompressBlockBC7(src, xx, yy, dst, settings);
- }
- }
- ///////////////////////////////////////////////////////////
- // BC6H encoding
- /*struct bc6h_enc_settings
- {
- bool slow_mode;
- bool fast_mode;
- int refineIterations_1p;
- int refineIterations_2p;
- int fastSkipTreshold;
- };*/
- struct bc6h_enc_state
- {
- float block[64];
- float best_err;
- uint32 best_data[5]; // 4, +1 margin for skips
- float rgb_bounds[6];
- float max_span;
- int max_span_idx;
- int mode;
- int epb;
- int qbounds[8];
- // settings
- uniform bool slow_mode;
- uniform bool fast_mode;
- uniform int refineIterations_1p;
- uniform int refineIterations_2p;
- uniform int fastSkipTreshold;
- };
- void bc6h_code_2p(uint32 data[5], int pqep[], uint32 qblock[2], int part_id, int mode);
- void bc6h_code_1p(uint32 data[5], int qep[8], uint32 qblock[2], int mode);
- ///////////////////////////
- // BC6H format data
- static uniform const int mode_prefix_table[] =
- {
- 0, 1, 2, 6, 10, 14, 18, 22, 26, 30, 3, 7, 11, 15
- };
- inline uniform int get_mode_prefix(uniform int mode)
- {
- return mode_prefix_table[mode];
- }
- static uniform const float span_table[] =
- {
- 0.9 * 0xFFFF / 64, // (0) 4 / 10
- 0.9 * 0xFFFF / 4, // (1) 5 / 7
- 0.8 * 0xFFFF / 256, // (2) 3 / 11
- -1, -1,
- 0.9 * 0xFFFF / 32, // (5) 4 / 9
- 0.9 * 0xFFFF / 16, // (6) 4 / 8
- -1, -1,
- 0xFFFF, // (9) absolute
- 0xFFFF, // (10) absolute
- 0.95 * 0xFFFF / 8, // (11) 8 / 11
- 0.95 * 0xFFFF / 32, // (12) 7 / 12
- 6, // (13) 3 / 16
- };
- inline uniform float get_span(uniform int mode)
- {
- uniform int span = span_table[mode];
- assert(span > 0);
- return span;
- }
- static uniform const int mode_bits_table[] =
- {
- 10, 7, 11, -1, -1,
- 9, 8, -1, -1, 6,
- 10, 11, 12, 16,
- };
- inline uniform int get_mode_bits(uniform int mode)
- {
- uniform int mode_bits = mode_bits_table[mode];
- assert(mode_bits > 0);
- return mode_bits;
- }
- ///////////////////////////
- // endpoint quantization
- inline int unpack_to_uf16(uint32 v, int bits)
- {
- if (bits >= 15) return v;
- if (v == 0) return 0;
- if (v == (1<<bits)-1) return 0xFFFF;
- return (v * 2 + 1) << (15-bits);
- }
- void ep_quant_bc6h(int qep[], float ep[], int bits, uniform int pairs)
- {
- int levels = 1 << bits;
- for (uniform int i = 0; i < 8 * pairs; i++)
- {
- int v = ((int)(ep[i] / (256 * 256.f - 1) * (levels - 1) + 0.5f));
- qep[i] = clamp(v, 0, levels - 1);
- }
- }
- void ep_dequant_bc6h(float ep[], int qep[], int bits, uniform int pairs)
- {
- for (uniform int i = 0; i < 8 * pairs; i++)
- ep[i] = unpack_to_uf16(qep[i], bits);
- }
- void ep_quant_dequant_bc6h(bc6h_enc_state state[], int qep[], float ep[], uniform int pairs)
- {
- int bits = state->epb;
- ep_quant_bc6h(qep, ep, bits, pairs);
- for (uniform int i = 0; i < 2 * pairs; i++)
- for (uniform int p = 0; p < 3; p++)
- {
- qep[i * 4 + p] = clamp(qep[i * 4 + p], state->qbounds[p], state->qbounds[4 + p]);
- }
- ep_dequant_bc6h(ep, qep, bits, pairs);
- }
- //////////////////////////
- // parameter estimation
- float bc6h_enc_2p_part_fast(bc6h_enc_state state[], int qep[16], uint32 qblock[2], int part_id)
- {
- uint32 pattern = get_pattern(part_id);
- uniform int bits = 3;
- uniform int pairs = 2;
- uniform int channels = 3;
- float ep[16];
- for (uniform int j = 0; j<pairs; j++)
- {
- int mask = get_pattern_mask(part_id, j);
- block_segment_core(&ep[j * 8], state->block, mask, channels);
- }
- ep_quant_dequant_bc6h(state, qep, ep, 2);
- float total_err = block_quant(qblock, state->block, bits, ep, pattern, channels);
- return total_err;
- }
- void bc6h_enc_2p_list(bc6h_enc_state state[], int part_list[], uniform int part_count)
- {
- if (part_count == 0) return;
- uniform int bits = 3;
- uniform int pairs = 2;
- uniform int channels = 3;
- int best_qep[24];
- uint32 best_qblock[2];
- int best_part_id = -1;
- float best_err = FLT_MAX;
- for (uniform int part = 0; part<part_count; part++)
- {
- int part_id = part_list[part] & 31;
- int qep[24];
- uint32 qblock[2];
- float err = bc6h_enc_2p_part_fast(state, qep, qblock, part_id);
- if (err<best_err)
- {
- for (uniform int i = 0; i<8 * pairs; i++) best_qep[i] = qep[i];
- for (uniform int k = 0; k<2; k++) best_qblock[k] = qblock[k];
- best_part_id = part_id;
- best_err = err;
- }
- }
- // refine
- uniform int refineIterations = state->refineIterations_2p;
- for (uniform int _ = 0; _<refineIterations; _++)
- {
- float ep[24];
- for (uniform int j = 0; j<pairs; j++)
- {
- int mask = get_pattern_mask(best_part_id, j);
- opt_endpoints(&ep[j * 8], state->block, bits, best_qblock, mask, channels);
- }
- int qep[24];
- uint32 qblock[2];
- ep_quant_dequant_bc6h(state, qep, ep, 2);
- uint32 pattern = get_pattern(best_part_id);
- float err = block_quant(qblock, state->block, bits, ep, pattern, channels);
- if (err<best_err)
- {
- for (uniform int i = 0; i<8 * pairs; i++) best_qep[i] = qep[i];
- for (uniform int k = 0; k<2; k++) best_qblock[k] = qblock[k];
- best_err = err;
- }
- }
- if (best_err<state->best_err)
- {
- state->best_err = best_err;
- bc6h_code_2p(state->best_data, best_qep, best_qblock, best_part_id, state->mode);
- }
- }
- void bc6h_enc_2p(bc6h_enc_state state[])
- {
- float full_stats[15];
- compute_stats_masked(full_stats, state->block, -1, 3);
- int part_list[32];
- for (uniform int part = 0; part < 32; part++)
- {
- int mask = get_pattern_mask(part, 0);
- float bound12 = block_pca_bound_split(state->block, mask, full_stats, 3);
- int bound = (int)(bound12);
- part_list[part] = part + bound * 64;
- }
-
- partial_sort_list(part_list, 32, state->fastSkipTreshold);
- bc6h_enc_2p_list(state, part_list, state->fastSkipTreshold);
- }
- void bc6h_enc_1p(bc6h_enc_state state[])
- {
- float ep[8];
- block_segment_core(ep, state->block, -1, 3);
- int qep[8];
- ep_quant_dequant_bc6h(state, qep, ep, 1);
- uint32 qblock[2];
- float err = block_quant(qblock, state->block, 4, ep, 0, 3);
- // refine
- uniform int refineIterations = state->refineIterations_1p;
- for (uniform int i = 0; i<refineIterations; i++)
- {
- opt_endpoints(ep, state->block, 4, qblock, -1, 3);
- ep_quant_dequant_bc6h(state, qep, ep, 1);
- err = block_quant(qblock, state->block, 4, ep, 0, 3);
- }
- if (err < state->best_err)
- {
- state->best_err = err;
- bc6h_code_1p(state->best_data, qep, qblock, state->mode);
- }
- }
- inline void compute_qbounds(bc6h_enc_state state[], float rgb_span[3])
- {
- float bounds[8];
- for (uniform int p = 0; p < 3; p++)
- {
- float middle = (state->rgb_bounds[p] + state->rgb_bounds[3 + p]) / 2;
- bounds[ p] = middle - rgb_span[p] / 2;
- bounds[4+p] = middle + rgb_span[p] / 2;
- }
- ep_quant_bc6h(state->qbounds, bounds, state->epb, 1);
- }
- void compute_qbounds(bc6h_enc_state state[], float span)
- {
- float rgb_span[3] = { span, span, span };
- compute_qbounds(state, rgb_span);
- }
- void compute_qbounds2(bc6h_enc_state state[], float span, int max_span_idx)
- {
- float rgb_span[3] = { span, span, span };
- for (uniform int p = 0; p < 3; p++)
- {
- rgb_span[p] *= (p == max_span_idx) ? 2 : 1;
- }
- compute_qbounds(state, rgb_span);
- }
- void bc6h_test_mode(bc6h_enc_state state[], uniform int mode, uniform bool enc, uniform float margin)
- {
- uniform int mode_bits = get_mode_bits(mode);
- uniform float span = get_span(mode);
- float max_span = state->max_span;
- int max_span_idx = state->max_span_idx;
- if (max_span * margin > span) return;
- if (mode >= 10)
- {
- state->epb = mode_bits;
- state->mode = mode;
- compute_qbounds(state, span);
- if (enc) bc6h_enc_1p(state);
- }
- else if (mode <= 1 || mode == 5 || mode == 9)
- {
- state->epb = mode_bits;
- state->mode = mode;
- compute_qbounds(state, span);
- if (enc) bc6h_enc_2p(state);
- }
- else
- {
- state->epb = mode_bits;
- state->mode = mode + max_span_idx;
-
- compute_qbounds2(state, span, max_span_idx);
- if (enc) bc6h_enc_2p(state);
- }
- }
- //////////////////////////
- // BC6H bitstream coding
- int bit_at(int v, uniform int pos)
- {
- return (v >> pos) & 1;
- }
- uint32 reverse_bits(uint32 v, uniform int bits)
- {
- if (bits == 2)
- {
- return (v >> 1) + (v & 1) * 2;
- }
- if (bits == 6)
- {
- v = (v & 0x5555) * 2 + ((v >> 1) & 0x5555);
- return (v >> 4) + ((v >> 2) & 3) * 4 + (v & 3) * 16;
- }
- return 0; // ESENTHEL CHANGED to silence warning
- }
- void bc6h_pack(uint32 packed[], int qep[], int mode)
- {
- if (mode == 0)
- {
- int pred_qep[16];
- for (uniform int p = 0; p < 3; p++)
- {
- pred_qep[ p] = qep[p];
- pred_qep[ 4 + p] = (qep[ 4 + p] - qep[p]) & 31;
- pred_qep[ 8 + p] = (qep[ 8 + p] - qep[p]) & 31;
- pred_qep[12 + p] = (qep[12 + p] - qep[p]) & 31;
- }
- for (uniform int i = 1; i < 4; i++)
- for (uniform int p = 0; p < 3; p++)
- {
- assert( qep[i * 4 + p] - qep[p] <= 15);
- assert(-16 <= qep[i * 4 + p] - qep[p]);
- }
-
- /*
- g2[4], b2[4], b3[4],
- r0[9:0],
- g0[9:0],
- b0[9:0],
- r1[4:0], g3[4], g2[3:0],
- g1[4:0], b3[0], g3[3:0],
- b1[4:0], b3[1], b2[3:0],
- r2[4:0], b3[2],
- r3[4:0], b3[3]
- */
- uint32 pqep[10];
- pqep[4] = pred_qep[4] + (pred_qep[ 8 + 1] & 15) * 64;
- pqep[5] = pred_qep[5] + (pred_qep[12 + 1] & 15) * 64;
- pqep[6] = pred_qep[6] + (pred_qep[ 8 + 2] & 15) * 64;
- pqep[4] += bit_at(pred_qep[12 + 1], 4) << 5;
- pqep[5] += bit_at(pred_qep[12 + 2], 0) << 5;
- pqep[6] += bit_at(pred_qep[12 + 2], 1) << 5;
- pqep[8] = pred_qep[ 8] + bit_at(pred_qep[12 + 2], 2) * 32;
- pqep[9] = pred_qep[12] + bit_at(pred_qep[12 + 2], 3) * 32;
- packed[0] = get_mode_prefix(0);
- packed[0] += bit_at(pred_qep[ 8 + 1], 4) << 2;
- packed[0] += bit_at(pred_qep[ 8 + 2], 4) << 3;
- packed[0] += bit_at(pred_qep[12 + 2], 4) << 4;
- packed[1] = (pred_qep[2] << 20) + (pred_qep[1] << 10) + pred_qep[0];
- packed[2] = (pqep[6] << 20) + (pqep[5] << 10) + pqep[4];
- packed[3] = (pqep[9] << 6) + pqep[8];
- }
- else if (mode == 1)
- {
- int pred_qep[16];
- for (uniform int p = 0; p < 3; p++)
- {
- pred_qep[ p] = qep[p];
- pred_qep[ 4 + p] = (qep[ 4 + p] - qep[p]) & 63;
- pred_qep[ 8 + p] = (qep[ 8 + p] - qep[p]) & 63;
- pred_qep[12 + p] = (qep[12 + p] - qep[p]) & 63;
- }
- for (uniform int i = 1; i < 4; i++)
- for (uniform int p = 0; p < 3; p++)
- {
- assert( qep[i * 4 + p] - qep[p] <= 31);
- assert(-32 <= qep[i * 4 + p] - qep[p]);
- }
-
- /*
- g2[5], g3[4], g3[5],
- r0[6:0], b3[0], b3[1], b2[4],
- g0[6:0], b2[5], b3[2], g2[4],
- b0[6:0], b3[3], b3[5], b3[4],
- r1[5:0], g2[3:0],
- g1[5:0], g3[3:0],
- b1[5:0], b2[3:0],
- r2[5:0],
- r3[5:0]
- */
- uint32 pqep[8];
- pqep[0] = pred_qep[0];
- pqep[0] += bit_at(pred_qep[12 + 2], 0) << 7;
- pqep[0] += bit_at(pred_qep[12 + 2], 1) << 8;
- pqep[0] += bit_at(pred_qep[ 8 + 2], 4) << 9;
- pqep[1] = pred_qep[1];
- pqep[1] += bit_at(pred_qep[ 8 + 2], 5) << 7;
- pqep[1] += bit_at(pred_qep[12 + 2], 2) << 8;
- pqep[1] += bit_at(pred_qep[ 8 + 1], 4) << 9;
- pqep[2] = pred_qep[2];
- pqep[2] += bit_at(pred_qep[12 + 2], 3) << 7;
- pqep[2] += bit_at(pred_qep[12 + 2], 5) << 8;
- pqep[2] += bit_at(pred_qep[12 + 2], 4) << 9;
- pqep[4] = pred_qep[4] + (pred_qep[ 8 + 1] & 15) * 64;
- pqep[5] = pred_qep[5] + (pred_qep[12 + 1] & 15) * 64;
- pqep[6] = pred_qep[6] + (pred_qep[ 8 + 2] & 15) * 64;
- packed[0] = get_mode_prefix(1);
- packed[0] += bit_at(pred_qep[ 8 + 1], 5) << 2;
- packed[0] += bit_at(pred_qep[12 + 1], 4) << 3;
- packed[0] += bit_at(pred_qep[12 + 1], 5) << 4;
- packed[1] = (pqep[2] << 20) + (pqep[1] << 10) + pqep[0];
- packed[2] = (pqep[6] << 20) + (pqep[5] << 10) + pqep[4];
- packed[3] = (pred_qep[12] << 6) + pred_qep[8];
- }
- else if (mode == 2 || mode == 3 || mode == 4)
- {
- /*
- r0[9:0], g0[9:0], b0[9:0],
- r1[3:0], xx[y], xx[y], g2[3:0],
- g1[3:0], xx[y], xx[y], g3[3:0],
- b1[3:0], xx[y], xx[y], b2[3:0],
- r2[3:0], xx[y], xx[y],
- r3[3:0], xx[y], xx[y]
- */
- int dqep[16];
- for (uniform int p = 0; p < 3; p++)
- {
- int mask = 15;
- if (p == mode - 2) mask = 31;
- dqep[p] = qep[p];
- dqep[ 4 + p] = (qep[ 4 + p] - qep[p]) & mask;
- dqep[ 8 + p] = (qep[ 8 + p] - qep[p]) & mask;
- dqep[12 + p] = (qep[12 + p] - qep[p]) & mask;
- }
- for (uniform int i = 1; i < 4; i++)
- for (uniform int p = 0; p < 3; p++)
- {
- int bits = 4;
- if (p == mode - 2) bits = 5;
- assert( qep[i * 4 + p] - qep[p] <= (1<<bits)/2 - 1);
- assert(-(1<<bits)/2 <= qep[i * 4 + p] - qep[p]);
- }
-
- uint32 pqep[10];
- pqep[0] = dqep[0] & 1023;
- pqep[1] = dqep[1] & 1023;
- pqep[2] = dqep[2] & 1023;
- pqep[4] = dqep[4] + (dqep[ 8 + 1] & 15) * 64;
- pqep[5] = dqep[5] + (dqep[12 + 1] & 15) * 64;
- pqep[6] = dqep[6] + (dqep[ 8 + 2] & 15) * 64;
- pqep[8] = dqep[8];
- pqep[9] = dqep[12];
- if (mode == 2)
- {
- /*
- r0[9:0], g0[9:0], b0[9:0],
- r1[3:0], r1[4], r0[10], g2[3:0],
- g1[3:0], g0[10], b3[0], g3[3:0],
- b1[3:0], b0[10], b3[1], b2[3:0],
- r2[3:0], r2[4], b3[2],
- r3[3:0], r3[4], b3[3]
- */
- packed[0] = get_mode_prefix(2);
- //
- pqep[5] += bit_at(dqep[0 + 1], 10) << 4;
- pqep[6] += bit_at(dqep[0 + 2], 10) << 4;
- //
- //
- pqep[4] += bit_at(dqep[0 + 0], 10) << 5;
- pqep[5] += bit_at(dqep[12 + 2], 0) << 5;
- pqep[6] += bit_at(dqep[12 + 2], 1) << 5;
- pqep[8] += bit_at(dqep[12 + 2], 2) << 5;
- pqep[9] += bit_at(dqep[12 + 2], 3) << 5;
- }
- if (mode == 3)
- {
- /*
- r0[9:0], g0[9:0], b0[9:0],
- r1[3:0], r0[10], g3[4], g2[3:0],
- g1[3:0], g1[4], g0[10], g3[3:0],
- b1[3:0], b0[10], b3[1], b2[3:0],
- r2[3:0], b3[0], b3[2],
- r3[3:0], g2[4], b3[3]
- */
- packed[0] = get_mode_prefix(3);
- pqep[4] += bit_at(dqep[0 + 0], 10) << 4;
- //
- pqep[6] += bit_at(dqep[0 + 2], 10) << 4;
- pqep[8] += bit_at(dqep[12 + 2], 0) << 4;
- pqep[9] += bit_at(dqep[ 8 + 1], 4) << 4;
- pqep[4] += bit_at(dqep[12 + 1], 4) << 5;
- pqep[5] += bit_at(dqep[0 + 1], 10) << 5;
- pqep[6] += bit_at(dqep[12 + 2], 1) << 5;
- pqep[8] += bit_at(dqep[12 + 2], 2) << 5;
- pqep[9] += bit_at(dqep[12 + 2], 3) << 5;
- }
- if (mode == 4)
- {
- /*
- r0[9:0], g0[9:0], b0[9:0],
- r1[3:0], r0[10], b2[4], g2[3:0],
- g1[3:0], g0[10], b3[0], g3[3:0],
- b1[3:0], b1[4], b0[10], b2[3:0],
- r2[3:0], b3[1], b3[2],
- r3[3:0], b3[4], b3[3]
- */
- packed[0] = get_mode_prefix(4);
- pqep[4] += bit_at(dqep[0 + 0], 10) << 4;
- pqep[5] += bit_at(dqep[0 + 1], 10) << 4;
- //
- pqep[8] += bit_at(dqep[12 + 2], 1) << 4;
- pqep[9] += bit_at(dqep[12 + 2], 4) << 4;
- pqep[4] += bit_at(dqep[ 8 + 2], 4) << 5;
- pqep[5] += bit_at(dqep[12 + 2], 0) << 5;
- pqep[6] += bit_at(dqep[0 + 2], 10) << 5;
- pqep[8] += bit_at(dqep[12 + 2], 2) << 5;
- pqep[9] += bit_at(dqep[12 + 2], 3) << 5;
- }
- packed[1] = (pqep[2] << 20) + (pqep[1] << 10) + pqep[0];
- packed[2] = (pqep[6] << 20) + (pqep[5] << 10) + pqep[4];
- packed[3] = (pqep[9] << 6) + pqep[8];
- }
- else if (mode == 5)
- {
- int dqep[16];
- for (uniform int p = 0; p < 3; p++)
- {
- dqep[p] = qep[p];
- dqep[ 4 + p] = (qep[ 4 + p] - qep[p]) & 31;
- dqep[ 8 + p] = (qep[ 8 + p] - qep[p]) & 31;
- dqep[12 + p] = (qep[12 + p] - qep[p]) & 31;
- }
- for (uniform int i = 1; i < 4; i++)
- for (uniform int p = 0; p < 3; p++)
- {
- assert( qep[i * 4 + p] - qep[p] <= 15);
- assert(-16 <= qep[i * 4 + p] - qep[p]);
- }
-
- /*
- r0[8:0], b2[4],
- g0[8:0], g2[4],
- b0[8:0], b3[4],
- r1[4:0], g3[4], g2[3:0],
- g1[4:0], b3[0], g3[3:0],
- b1[4:0], b3[1], b2[3:0],
- r2[4:0], b3[2],
- r3[4:0], b3[3]
- */
- uint32 pqep[10];
- pqep[0] = dqep[0];
- pqep[1] = dqep[1];
- pqep[2] = dqep[2];
- pqep[4] = dqep[4] + (dqep[ 8 + 1] & 15) * 64;
- pqep[5] = dqep[5] + (dqep[12 + 1] & 15) * 64;
- pqep[6] = dqep[6] + (dqep[ 8 + 2] & 15) * 64;
- pqep[8] = dqep[8];
- pqep[9] = dqep[12];
- pqep[0] += bit_at(dqep[ 8 + 2], 4) << 9;
- pqep[1] += bit_at(dqep[ 8 + 1], 4) << 9;
- pqep[2] += bit_at(dqep[12 + 2], 4) << 9;
-
- pqep[4] += bit_at(dqep[12 + 1], 4) << 5;
- pqep[5] += bit_at(dqep[12 + 2], 0) << 5;
- pqep[6] += bit_at(dqep[12 + 2], 1) << 5;
- pqep[8] += bit_at(dqep[12 + 2], 2) << 5;
- pqep[9] += bit_at(dqep[12 + 2], 3) << 5;
- packed[0] = get_mode_prefix(5);
- packed[1] = (pqep[2] << 20) + (pqep[1] << 10) + pqep[0];
- packed[2] = (pqep[6] << 20) + (pqep[5] << 10) + pqep[4];
- packed[3] = (pqep[9] << 6) + pqep[8];
- }
- else if (mode == 6 || mode == 7 || mode == 8)
- {
- /*
- r0[7:0], xx[y], b2[4],
- g0[7:0], xx[y], g2[4],
- b0[7:0], xx[y], b3[4],
- r1[4:0], xx[y], g2[3:0],
- g1[4:0], xx[y], g3[3:0],
- b1[4:0], xx[y], b2[3:0],
- r2[4:0], xx[y],
- r3[4:0], xx[y]
- */
- int dqep[16];
- for (uniform int p = 0; p < 3; p++)
- {
- int mask = 31;
- if (p == mode - 6) mask = 63;
- dqep[p] = qep[p];
- dqep[ 4 + p] = (qep[ 4 + p] - qep[p]) & mask;
- dqep[ 8 + p] = (qep[ 8 + p] - qep[p]) & mask;
- dqep[12 + p] = (qep[12 + p] - qep[p]) & mask;
- }
- for (uniform int i = 1; i < 4; i++)
- for (uniform int p = 0; p < 3; p++)
- {
- int bits = 5;
- if (p == mode - 6) bits = 6;
- assert( qep[i * 4 + p] - qep[p] <= (1<<bits)/2 - 1);
- assert(-(1<<bits)/2 <= qep[i * 4 + p] - qep[p]);
- }
-
- uint32 pqep[10];
- pqep[0] = dqep[0];
- pqep[0] += bit_at(dqep[ 8 + 2], 4) << 9;
- pqep[1] = dqep[1];
- pqep[1] += bit_at(dqep[ 8 + 1], 4) << 9;
- pqep[2] = dqep[2];
- pqep[2] += bit_at(dqep[12 + 2], 4) << 9;
- pqep[4] = dqep[4] + (dqep[ 8 + 1] & 15) * 64;
- pqep[5] = dqep[5] + (dqep[12 + 1] & 15) * 64;
- pqep[6] = dqep[6] + (dqep[ 8 + 2] & 15) * 64;
- pqep[8] = dqep[8];
- pqep[9] = dqep[12];
- if (mode == 6)
- {
- /*
- r0[7:0], g3[4], b2[4],
- g0[7:0], b3[2], g2[4],
- b0[7:0], b3[3], b3[4],
- r1[4:0], r1[5], g2[3:0],
- g1[4:0], b3[0], g3[3:0],
- b1[4:0], b3[1], b2[3:0],
- r2[5:0],
- r3[5:0]
- */
- packed[0] = get_mode_prefix(6);
- pqep[0] += bit_at(dqep[12 + 1], 4) << 8;
- pqep[1] += bit_at(dqep[12 + 2], 2) << 8;
- pqep[2] += bit_at(dqep[12 + 2], 3) << 8;
- //
- pqep[5] += bit_at(dqep[12 + 2], 0) << 5;
- pqep[6] += bit_at(dqep[12 + 2], 1) << 5;
- //
- //
- }
- if (mode == 7)
- {
- /*
- r0[7:0], b3[0], b2[4],
- g0[7:0], g2[5], g2[4],
- b0[7:0], g3[5], b3[4],
- r1[4:0], g3[4], g2[3:0],
- g1[4:0], g1[5], g3[3:0],
- b1[4:0], b3[1], b2[3:0],
- r2[4:0], b3[2],
- r3[4:0], b3[3]
- */
- packed[0] = get_mode_prefix(7);
- pqep[0] += bit_at(dqep[12 + 2], 0) << 8;
- pqep[1] += bit_at(dqep[ 8 + 1], 5) << 8;
- pqep[2] += bit_at(dqep[12 + 1], 5) << 8;
- pqep[4] += bit_at(dqep[12 + 1], 4) << 5;
- //
- pqep[6] += bit_at(dqep[12 + 2], 1) << 5;
- pqep[8] += bit_at(dqep[12 + 2], 2) << 5;
- pqep[9] += bit_at(dqep[12 + 2], 3) << 5;
- }
- if (mode == 8)
- {
- /*
- r0[7:0], b3[1], b2[4],
- g0[7:0], b2[5], g2[4],
- b0[7:0], b3[5], b3[4],
- r1[4:0], g3[4], g2[3:0],
- g1[4:0], b3[0], g3[3:0],
- b1[4:0], b1[5], b2[3:0],
- r2[4:0], b3[2],
- r3[4:0], b3[3]
- */
- packed[0] = get_mode_prefix(8);
- pqep[0] += bit_at(dqep[12 + 2], 1) << 8;
- pqep[1] += bit_at(dqep[ 8 + 2], 5) << 8;
- pqep[2] += bit_at(dqep[12 + 2], 5) << 8;
- pqep[4] += bit_at(dqep[12 + 1], 4) << 5;
- pqep[5] += bit_at(dqep[12 + 2], 0) << 5;
- //
- pqep[8] += bit_at(dqep[12 + 2], 2) << 5;
- pqep[9] += bit_at(dqep[12 + 2], 3) << 5;
- }
- packed[1] = (pqep[2] << 20) + (pqep[1] << 10) + pqep[0];
- packed[2] = (pqep[6] << 20) + (pqep[5] << 10) + pqep[4];
- packed[3] = (pqep[9] << 6) + pqep[8];
- }
- else if (mode == 9)
- {
- /*
- r0[5:0], g3[4], b3[0], b3[1], b2[4], // 10
- g0[5:0], g2[5], b2[5], b3[2], g2[4], // 10
- b0[5:0], g3[5], b3[3], b3[5], b3[4], // 10
- r1[5:0], g2[3:0], // 10
- g1[5:0], g3[3:0], // 10
- b1[5:0], b2[3:0], // 10
- r2[5:0], // 6
- r3[5:0] // 6
- */
- uint32 pqep[10];
- pqep[0] = qep[0];
- pqep[0] += bit_at(qep[12 + 1], 4) << 6;
- pqep[0] += bit_at(qep[12 + 2], 0) << 7;
- pqep[0] += bit_at(qep[12 + 2], 1) << 8;
- pqep[0] += bit_at(qep[ 8 + 2], 4) << 9;
- pqep[1] = qep[1];
- pqep[1] += bit_at(qep[ 8 + 1], 5) << 6;
- pqep[1] += bit_at(qep[ 8 + 2], 5) << 7;
- pqep[1] += bit_at(qep[12 + 2], 2) << 8;
- pqep[1] += bit_at(qep[ 8 + 1], 4) << 9;
- pqep[2] = qep[2];
- pqep[2] += bit_at(qep[12 + 1], 5) << 6;
- pqep[2] += bit_at(qep[12 + 2], 3) << 7;
- pqep[2] += bit_at(qep[12 + 2], 5) << 8;
- pqep[2] += bit_at(qep[12 + 2], 4) << 9;
- pqep[4] = qep[4] + (qep[ 8 + 1] & 15) * 64;
- pqep[5] = qep[5] + (qep[12 + 1] & 15) * 64;
- pqep[6] = qep[6] + (qep[ 8 + 2] & 15) * 64;
- packed[0] = get_mode_prefix(9);
- packed[1] = (pqep[2] << 20) + (pqep[1] << 10) + pqep[0];
- packed[2] = (pqep[6] << 20) + (pqep[5] << 10) + pqep[4];
- packed[3] = (qep[12] << 6) + qep[8];
- }
- else if (mode == 10)
- {
- // the only mode with nothing to do ~
- packed[0] = get_mode_prefix(10);
- packed[1] = (qep[2] << 20) + (qep[1] << 10) + qep[0];
- packed[2] = (qep[6] << 20) + (qep[5] << 10) + qep[4];
- }
- else if (mode == 11)
- {
- int dqep[8];
- for (uniform int p = 0; p < 3; p++)
- {
- dqep[p] = qep[p];
- dqep[4 + p] = (qep[4 + p] - qep[p]) & 511;
- }
-
- for (uniform int i = 1; i < 2; i++)
- for (uniform int p = 0; p < 3; p++)
- {
- assert( qep[i * 4 + p] - qep[p] <= 255);
- assert(-256 <= qep[i * 4 + p] - qep[p]);
- }
- /*
- r0[9:0], g0[9:0], b0[9:0],
- r1[8:0], r0[10],
- g1[8:0], g0[10],
- b1[8:0], b0[10]
- */
- uint32 pqep[8];
- pqep[0] = dqep[0] & 1023;
- pqep[1] = dqep[1] & 1023;
- pqep[2] = dqep[2] & 1023;
- pqep[4] = dqep[4] + (dqep[0] >> 10) * 512;
- pqep[5] = dqep[5] + (dqep[1] >> 10) * 512;
- pqep[6] = dqep[6] + (dqep[2] >> 10) * 512;
- packed[0] = get_mode_prefix(11);
- packed[1] = (pqep[2] << 20) + (pqep[1] << 10) + pqep[0];
- packed[2] = (pqep[6] << 20) + (pqep[5] << 10) + pqep[4];
- }
- else if (mode == 12)
- {
- int dqep[8];
- for (uniform int p = 0; p < 3; p++)
- {
- dqep[p] = qep[p];
- dqep[4 + p] = (qep[4 + p] - qep[p]) & 255;
- }
-
- for (uniform int i = 1; i < 2; i++)
- for (uniform int p = 0; p < 3; p++)
- {
- assert( qep[i * 4 + p] - qep[p] <= 127);
- assert(-128 <= qep[i * 4 + p] - qep[p]);
- }
- /*
- r0[9:0], g0[9:0], b0[9:0],
- r1[7:0], r0[10:11],
- g1[7:0], g0[10:11],
- b1[7:0], b0[10:11]
- */
- uint32 pqep[8];
- pqep[0] = dqep[0] & 1023;
- pqep[1] = dqep[1] & 1023;
- pqep[2] = dqep[2] & 1023;
- pqep[4] = dqep[4] + reverse_bits(dqep[0] >> 10, 2) * 256;
- pqep[5] = dqep[5] + reverse_bits(dqep[1] >> 10, 2) * 256;
- pqep[6] = dqep[6] + reverse_bits(dqep[2] >> 10, 2) * 256;
- packed[0] = get_mode_prefix(12);
- packed[1] = (pqep[2] << 20) + (pqep[1] << 10) + pqep[0];
- packed[2] = (pqep[6] << 20) + (pqep[5] << 10) + pqep[4];
- }
- else if (mode == 13)
- {
- int dqep[8];
- for (uniform int p = 0; p < 3; p++)
- {
- dqep[p] = qep[p];
- dqep[4 + p] = (qep[4 + p] - qep[p]) & 15;
- }
-
- for (uniform int i = 1; i < 2; i++)
- for (uniform int p = 0; p < 3; p++)
- {
- assert( qep[i * 4 + p] - qep[p] <= 7);
- assert(-8 <= qep[i * 4 + p] - qep[p]);
- }
- /*
- r0[9:0], g0[9:0], b0[9:0],
- r1[3:0], r0[10:15],
- g1[3:0], g0[10:15],
- b1[3:0], b0[10:15]
- */
- uint32 pqep[8];
- pqep[0] = dqep[0] & 1023;
- pqep[1] = dqep[1] & 1023;
- pqep[2] = dqep[2] & 1023;
- pqep[4] = dqep[4] + reverse_bits(dqep[0] >> 10, 6) * 16;
- pqep[5] = dqep[5] + reverse_bits(dqep[1] >> 10, 6) * 16;
- pqep[6] = dqep[6] + reverse_bits(dqep[2] >> 10, 6) * 16;
- packed[0] = get_mode_prefix(13);
- packed[1] = (pqep[2] << 20) + (pqep[1] << 10) + pqep[0];
- packed[2] = (pqep[6] << 20) + (pqep[5] << 10) + pqep[4];
- }
- else
- {
- assert(false);
- }
- }
- void bc6h_code_2p(uint32 data[5], int qep[], uint32 qblock[2], int part_id, int mode)
- {
- uniform int bits = 3;
- uniform int pairs = 2;
- uniform int channels = 3;
- int flips = bc7_code_apply_swap_mode01237(qep, qblock, 1, part_id);
- for (uniform int k=0; k<5; k++) data[k] = 0;
- uniform int pos = 0;
- uint32 packed[4];
- bc6h_pack(packed, qep, mode);
- // mode
- put_bits(data, &pos, 5, packed[0]);
- // endpoints
- put_bits(data, &pos, 30, packed[1]);
- put_bits(data, &pos, 30, packed[2]);
- put_bits(data, &pos, 12, packed[3]);
-
- // partition
- put_bits(data, &pos, 5, part_id);
- // quantized values
- bc7_code_qblock(data, &pos, qblock, bits, flips);
- bc7_code_adjust_skip_mode01237(data, 1, part_id);
- }
- void bc6h_code_1p(uint32 data[5], int qep[8], uint32 qblock[2], int mode)
- {
- bc7_code_apply_swap_mode456(qep, 4, qblock, 4);
- for (uniform int k = 0; k<5; k++) data[k] = 0;
- uniform int pos = 0;
- uint32 packed[4];
- bc6h_pack(packed, qep, mode);
- // mode
- put_bits(data, &pos, 5, packed[0]);
- // endpoints
- put_bits(data, &pos, 30, packed[1]);
- put_bits(data, &pos, 30, packed[2]);
-
- // quantized values
- bc7_code_qblock(data, &pos, qblock, 4, 0);
- }
- //////////////////////////
- // BC6H core
- void bc6h_setup(bc6h_enc_state state[])
- {
- for (uniform int p = 0; p < 3; p++)
- {
- state->rgb_bounds[p ] = 0xFFFF;
- state->rgb_bounds[3+p] = 0;
- }
- // uf16 conversion, min/max
- for (uniform int p = 0; p < 3; p++)
- for (uniform int k = 0; k < 16; k++)
- {
- state->block[p * 16 + k] = (state->block[p * 16 + k] / 31) * 64;
- state->rgb_bounds[p ] = min(state->rgb_bounds[p ], state->block[p * 16 + k]);
- state->rgb_bounds[3+p] = max(state->rgb_bounds[3+p], state->block[p * 16 + k]);
- }
- state->max_span = 0;
- state->max_span_idx = 0;
- float rgb_span[] = { 0, 0, 0 };
- for (uniform int p = 0; p < 3; p++)
- {
- rgb_span[p] = state->rgb_bounds[3+p] - state->rgb_bounds[p];
- if (rgb_span[p] > state->max_span)
- {
- state->max_span_idx = p;
- state->max_span = rgb_span[p];
- }
- }
- }
- inline void CompressBlockBC6H_core(bc6h_enc_state state[])
- {
- bc6h_setup(state);
- if (state->slow_mode)
- {
- bc6h_test_mode(state, 0, true, 0);
- bc6h_test_mode(state, 1, true, 0);
- bc6h_test_mode(state, 2, true, 0);
- bc6h_test_mode(state, 5, true, 0);
- bc6h_test_mode(state, 6, true, 0);
- bc6h_test_mode(state, 9, true, 0);
- bc6h_test_mode(state, 10, true, 0);
- bc6h_test_mode(state, 11, true, 0);
- bc6h_test_mode(state, 12, true, 0);
- bc6h_test_mode(state, 13, true, 0);
- }
- else
- {
- if (state->fastSkipTreshold > 0)
- {
- bc6h_test_mode(state, 9, false, 0);
- if (state->fast_mode) bc6h_test_mode(state, 1, false, 1);
- bc6h_test_mode(state, 6, false, 1 / 1.2);
- bc6h_test_mode(state, 5, false, 1 / 1.2);
- bc6h_test_mode(state, 0, false, 1 / 1.2);
- bc6h_test_mode(state, 2, false, 1);
- bc6h_enc_2p(state);
- if (!state->fast_mode) bc6h_test_mode(state, 1, true, 0);
- }
- bc6h_test_mode(state, 10, false, 0);
- bc6h_test_mode(state, 11, false, 1);
- bc6h_test_mode(state, 12, false, 1);
- bc6h_test_mode(state, 13, false, 1);
- bc6h_enc_1p(state);
- }
- }
- void bc6h_enc_copy_settings(bc6h_enc_state state[], uniform bc6h_enc_settings settings[])
- {
- state->slow_mode = settings->slow_mode;
- state->fast_mode = settings->fast_mode;
- state->fastSkipTreshold = settings->fastSkipTreshold;
- state->refineIterations_1p = settings->refineIterations_1p;
- state->refineIterations_2p = settings->refineIterations_2p;
- }
- inline void CompressBlockBC6H(uniform rgba_surface src[], int xx, uniform int yy, uniform uint8 dst[], uniform bc6h_enc_settings settings[])
- {
- bc6h_enc_state _state;
- varying bc6h_enc_state* uniform state = &_state;
- bc6h_enc_copy_settings(state, settings);
- load_block_interleaved_16bit(state->block, src, xx, yy);
- state->best_err = FLT_MAX;
- CompressBlockBC6H_core(state);
- store_data(dst, src->width, xx, yy, state->best_data, 4);
- }
- export void CompressBlocksBC6H_ispc(uniform rgba_surface src[], uniform uint8 dst[], uniform bc6h_enc_settings settings[])
- {
- const int h_4=src->height/4, w_4=src->width/4;
- for (uniform int yy = 0; yy<h_4; yy++)
- for (uniform int xx = 0; xx<w_4; xx++) // foreach (xx = 0 ... src->width/4)
- {
- CompressBlockBC6H(src, xx, yy, dst, settings);
- }
- }
- ///////////////////////////////////////////////////////////
- // ETC encoding
- /*struct etc_enc_settings
- {
- int fastSkipTreshold;
- };*/
- struct etc_enc_state
- {
- float block[64];
- int prev_qcenter[3];
- float best_err;
- uint32 best_data[2];
- uniform bool diff;
- // settings
- uniform int fastSkipTreshold;
- };
- static uniform const int etc_codeword_table[8][4] =
- {
- { -8, -2, 2, 8 },
- { -17, -5, 5, 17 },
- { -29, -9, 9, 29 },
- { -42, -13, 13, 42 },
- { -60, -18, 18, 60 },
- { -80, -24, 24, 80 },
- { -106, -33, 33, 106 },
- { -183, -47, 47, 183 },
- };
- inline uniform int get_etc1_dY(uniform int table, uniform int q)
- {
- return etc_codeword_table[table][q];
- }
- uniform int remap_q[] = { 2, 3, 1, 0 };
- int get_remap2_q(int x)
- {
- x -= 2;
- if (x < 0) x = 1 - x;
- return x;
- }
- int extend_4to8bits(int value)
- {
- return (value << 4) | value;
- }
- int extend_5to8bits(int value)
- {
- return (value << 3) | (value >> 2);
- }
- int quantize_4bits(float value)
- {
- return clamp((value / 255.0f) * 15 + 0.5f, 0, 15);
- }
- int quantize_5bits(float value)
- {
- return clamp((value / 255.0f) * 31 + 0.5f, 0, 31);
- }
- void center_quant_dequant(int qcenter[3], float center[3], uniform bool diff, int prev_qcenter[3])
- {
- if (diff)
- {
- for (uniform int p = 0; p < 3; p++)
- {
- qcenter[p] = quantize_5bits(center[p]);
- if (prev_qcenter[0] >= 0)
- {
- if (qcenter[p] - prev_qcenter[p] > 3) qcenter[p] = prev_qcenter[p] + 3;
- if (qcenter[p] - prev_qcenter[p] < -4) qcenter[p] = prev_qcenter[p] - 4;
- }
- center[p] = extend_5to8bits(qcenter[p]);
- }
- }
- else
- {
- for (uniform int p = 0; p < 3; p++)
- {
- qcenter[p] = quantize_4bits(center[p]);
- center[p] = extend_4to8bits(qcenter[p]);
- }
- }
- }
- float quantize_pixels_etc1_half(uint32 qblock[1], float block[48], float center[3], uniform int table)
- {
- float total_err = 0;
- uint32 bits = 0;
- for (uniform int y = 0; y < 2; y++)
- for (uniform int x = 0; x < 4; x++)
- {
- float best_err = sq(255) * 3;
- int best_q = -1;
- for (uniform int q = 0; q < 4; q++)
- {
- int dY = get_etc1_dY(table, remap_q[q]);
- float err = 0;
- for (int p = 0; p < 3; p++)
- err += sq(block[16 * p + y*4+x] - clamp(center[p] + dY, 0, 255));
- if (err < best_err)
- {
- best_err = err;
- best_q = q;
- }
- }
- assert(best_q >= 0);
- bits |= (best_q & 1) << (x * 4 + y);
- bits |= (best_q >> 1) << (x * 4 + y + 16);
- total_err += best_err;
- }
- qblock[0] = bits;
- return total_err;
- }
- float compress_etc1_half_1(uint32 out_qbits[1], int out_table[1], int out_qcenter[3],
- float half_pixels[], uniform bool diff, int prev_qcenter[3])
- {
- float dc[3];
- for (uniform int p = 0; p<3; p++) dc[p] = 0;
- for (uniform int k = 0; k<8; k++)
- {
- for (uniform int p = 0; p<3; p++)
- dc[p] += half_pixels[k + p * 16];
- }
- float best_error = sq(255) * 3 * 8.0f;
- int best_table = -1;
- int best_qcenter[3];
- uint32 best_qbits;
- for (uniform int table_level = 0; table_level < 8; table_level++)
- {
- float center[3];
- int qcenter[3];
- uint32 qbits;
- for (uniform int p = 0; p < 3; p++) center[p] = dc[p] / 8 - get_etc1_dY(table_level, 2);
- center_quant_dequant(qcenter, center, diff, prev_qcenter);
- float err = quantize_pixels_etc1_half(&qbits, half_pixels, center, table_level);
- if (err < best_error)
- {
- best_error = err;
- best_table = table_level;
- best_qbits = qbits;
- for (uniform int p = 0; p < 3; p++) best_qcenter[p] = qcenter[p];
- }
- }
-
- out_table[0] = best_table;
- out_qbits[0] = best_qbits;
- for (uniform int p = 0; p < 3; p++) out_qcenter[p] = best_qcenter[p];
- return best_error;
- }
- float optimize_center(float colors[4][10], uniform int p, uniform int table_level)
- {
- float best_center = 0;
- for (uniform int q = 0; q < 4; q++)
- {
- best_center += (colors[q][7 + p] - get_etc1_dY(table_level, q)) * colors[q][3];
- }
- best_center /= 8;
- float best_err = 0;
- for (uniform int q = 0; q < 4; q++)
- {
- float dY = get_etc1_dY(table_level, q);
- best_err += sq(clamp(best_center + dY, 0, 255) - colors[q][7 + p]) * colors[q][3];
- }
- for (uniform int branch = 0; branch < 4; branch++)
- {
- float new_center = 0;
- float sum = 0;
- for (uniform int q = 0; q < 4; q++)
- {
- if (branch <= 1 && q <= branch) continue;
- if (branch >= 2 && q >= branch) continue;
- new_center += (colors[q][7 + p] - get_etc1_dY(table_level, q)) * colors[q][3];
- sum += colors[q][3];
- }
- new_center /= sum;
- float err = 0;
- for (uniform int q = 0; q < 4; q++)
- {
- float dY = get_etc1_dY(table_level, q);
- err += sq(clamp(new_center + dY, 0, 255) - colors[q][7 + p]) * colors[q][3];
- }
- if (err < best_err)
- {
- best_err = err;
- best_center = new_center;
- }
- }
- return best_center;
- }
- float compress_etc1_half_7(uint32 out_qbits[1], int out_table[1], int out_qcenter[3],
- float half_pixels[], etc_enc_state state[])
- {
- int err_list[165];
- int y_sorted_inv[8];
- float y_sorted[8];
- {
- int y_sorted_idx[8];
- for (uniform int k = 0; k < 8; k++)
- {
- float value = 0;
- for (uniform int p = 0; p < 3; p++)
- value += half_pixels[k + p * 16];
- y_sorted_idx[k] = (((int)value) << 4) + k;
- }
- partial_sort_list(y_sorted_idx, 8, 8);
- for (uniform int k = 0; k < 8; k++)
- y_sorted_inv[k] = ((y_sorted_idx[k] & 0xF) << 4) + k;
- for (uniform int k = 0; k < 8; k++)
- y_sorted[k] = (y_sorted_idx[k] >> 4) / 3.0f;
- partial_sort_list(y_sorted_inv, 8, 8);
- }
- uniform int idx = -1;
- for (uniform int level1 = 0; level1 <= 8; level1++)
- for (uniform int level2 = level1; level2 <= 8; level2++)
- for (uniform int level3 = level2; level3 <= 8; level3++)
- {
- idx++;
- assert(idx < 165);
-
- float sum[4];
- float sum_sq[4];
- float count[4];
- float inv_count[4];
- for (uniform int q = 0; q < 4; q++)
- {
- sum[q] = 0;
- sum_sq[q] = 0;
- count[q] = 0;
- inv_count[q] = 0;
- }
- for (uniform int k = 0; k < 8; k++)
- {
- uniform int q = 0;
- if (k >= level1) q = 1;
- if (k >= level2) q = 2;
- if (k >= level3) q = 3;
- sum[q] += y_sorted[k];
- sum_sq[q] += sq(y_sorted[k]);
- count[q] += 1;
- }
- for (uniform int q = 0; q < 4; q++)
- {
- if (count[q] > 0) inv_count[q] = 1 / count[q];
- }
- float base_err = 0;
- for (uniform int q = 0; q < 4; q++) base_err += sum_sq[q] - sq(sum[q]) * inv_count[q];
- float t_err = sq(256) * 8;
- for (uniform int table_level = 0; table_level < 8; table_level++)
- {
- float center = 0;
- for (uniform int q = 0; q < 4; q++) center += sum[q] - get_etc1_dY(table_level, q) * count[q];
- center /= 8;
- float err = base_err;
- for (uniform int q = 0; q < 4; q++)
- {
- err += sq(center + get_etc1_dY(table_level, q) - sum[q] * inv_count[q])*count[q];
- }
- t_err = min(t_err, err);
- }
- int packed = (level1 * 16 + level2) * 16 + level3;
- err_list[idx] = (((int)t_err) << 12) + packed;
- }
- partial_sort_list(err_list, 165, state->fastSkipTreshold);
- float best_error = sq(255) * 3 * 8.0f;
- int best_table = -1;
- int best_qcenter[3];
- uint32 best_qbits;
- for (uniform int i = 0; i < state->fastSkipTreshold; i++)
- {
- int packed = err_list[i] & 0xFFF;
- int level1 = (packed >> 8) & 0xF;
- int level2 = (packed >> 4) & 0xF;
- int level3 = (packed >> 0) & 0xF;
-
- float colors[4][10];
- for (uniform int p = 0; p < 7; p++)
- for (uniform int q = 0; q < 4; q++) colors[q][p] = 0;
- uint32 qbits = 0;
- for (uniform int kk = 0; kk < 8; kk++)
- {
- int k = y_sorted_inv[kk] & 0xF;
- int qq = 0;
- if (k >= level1) qq = 1;
- if (k >= level2) qq = 2;
- if (k >= level3) qq = 3;
- uniform int xx = kk & 3;
- uniform int yy = kk >> 2;
- int qqq = get_remap2_q(qq);
- qbits |= (qqq & 1) << (yy + xx * 4);
- qbits |= (qqq >> 1) << (16 + yy + xx * 4);
- float qvec[4];
- for (uniform int q = 0; q < 4; q++)
- {
- qvec[q] = q == qq ? 1.0 : 0.0;
- colors[q][3] += qvec[q];
- }
- for (uniform int p = 0; p < 3; p++)
- {
- float value = half_pixels[16 * p + kk];
- for (uniform int q = 0; q < 4; q++)
- {
- colors[q][p] += value * qvec[q];
- colors[q][4 + p] += sq(value) * qvec[q];
- }
- }
- }
-
- float base_err = 0;
- for (uniform int q = 0; q < 4; q++)
- {
- if (colors[q][3] > 0)
- for (uniform int p = 0; p < 3; p++)
- {
- colors[q][7 + p] = colors[q][p] / colors[q][3];
- base_err += colors[q][4 + p] - sq(colors[q][7 + p])*colors[q][3];
- }
- }
- for (uniform int table_level = 0; table_level < 8; table_level++)
- {
- float center[3];
- int qcenter[3];
-
- for (uniform int p = 0; p < 3; p++)
- {
- center[p] = optimize_center(colors, p, table_level);
- }
-
- center_quant_dequant(qcenter, center, state->diff, state->prev_qcenter);
-
- float err = base_err;
- for (uniform int q = 0; q < 4; q++)
- {
- int dY = get_etc1_dY(table_level, q);
- for (uniform int p = 0; p < 3; p++)
- err += sq(clamp(center[p] + dY, 0, 255) - colors[q][7 + p])*colors[q][3];
- }
-
- if (err < best_error)
- {
- best_error = err;
- best_table = table_level;
- best_qbits = qbits;
- for (uniform int p = 0; p < 3; p++) best_qcenter[p] = qcenter[p];
- }
- }
- }
- out_table[0] = best_table;
- out_qbits[0] = best_qbits;
- for (uniform int p = 0; p < 3; p++) out_qcenter[p] = best_qcenter[p];
- return best_error;
- }
- float compress_etc1_half(uint32 qbits[1], int table[1], int qcenter[3], float half_pixels[], etc_enc_state state[])
- {
- float err = compress_etc1_half_7(qbits, table, qcenter, half_pixels, state);
- for (uniform int p = 0; p < 3; p++)
- state->prev_qcenter[p] = qcenter[p];
- return err;
- }
- //////////////////////////
- // ETC1 core
- inline uint32 bswap32(uint32 v)
- {
- uint32 r = 0;
- r += ((v >> 24) & 255) << 0;
- r += ((v >> 16) & 255) << 8;
- r += ((v >> 8) & 255) << 16;
- r += ((v >> 0) & 255) << 24;
- return r;
- }
- void etc_pack(uint32 data[], uint32 qbits[2], int tables[2], int qcenters[2][3], uniform int diff, uniform int flip)
- {
- for (uniform int k = 0; k < 2; k++) data[k] = 0;
- uniform int pos = 0;
- if (diff == 0)
- {
- put_bits(data, &pos, 4, qcenters[1][0]);
- put_bits(data, &pos, 4, qcenters[0][0]);
- put_bits(data, &pos, 4, qcenters[1][1]);
- put_bits(data, &pos, 4, qcenters[0][1]);
- put_bits(data, &pos, 4, qcenters[1][2]);
- put_bits(data, &pos, 4, qcenters[0][2]);
- }
- else
- {
- put_bits(data, &pos, 3, (qcenters[1][0] - qcenters[0][0]) & 7);
- put_bits(data, &pos, 5, qcenters[0][0]);
- put_bits(data, &pos, 3, (qcenters[1][1] - qcenters[0][1]) & 7);
- put_bits(data, &pos, 5, qcenters[0][1]);
- put_bits(data, &pos, 3, (qcenters[1][2] - qcenters[0][2]) & 7);
- put_bits(data, &pos, 5, qcenters[0][2]);
- }
- put_bits(data, &pos, 1, flip);
- put_bits(data, &pos, 1, diff);
- put_bits(data, &pos, 3, tables[1]);
- put_bits(data, &pos, 3, tables[0]);
- uint32 all_qbits_flipped = (qbits[1] << 2) | qbits[0];
- uint32 all_qbits = 0;
- if (flip != 0) all_qbits = all_qbits_flipped;
- if (flip == 0)
- for (uniform int k = 0; k < 2; k++)
- for (uniform int y = 0; y < 4; y++)
- for (uniform int x = 0; x < 4; x++)
- {
- int bit = (all_qbits_flipped >> (k * 16 + x * 4 + y)) & 1;
- all_qbits += bit << (k * 16 + y * 4 + x);
- }
- data[1] = bswap32(all_qbits);
- }
- inline void CompressBlockETC1_core(etc_enc_state state[])
- {
- float flipped_block[48];
- for (uniform int y = 0; y < 4; y++)
- for (uniform int x = 0; x < 4; x++)
- for (uniform int p = 0; p < 3; p++)
- {
- flipped_block[16 * p + x * 4 + y] = state->block[16 * p + y * 4 + x];
- }
- for (uniform int flip = 0; flip < 2; flip++)
- for (uniform int diff = 1; diff >= 0; diff--)
- {
- state->diff = diff == 1;
- state->prev_qcenter[0] = -1;
- varying float * uniform pixels = state->block;
- if (flip == 0) pixels = flipped_block;
- uint32 qbits[2];
- int tables[2];
- int qcenters[2][3];
- float err = 0;
- err += compress_etc1_half(&qbits[0], &tables[0], qcenters[0], &pixels[0], state);
- err += compress_etc1_half(&qbits[1], &tables[1], qcenters[1], &pixels[8], state);
- if (err < state->best_err)
- {
- state->best_err = err;
- etc_pack(state->best_data, qbits, tables, qcenters, diff, flip);
- }
- }
- }
- void etc_enc_copy_settings(etc_enc_state state[], uniform etc_enc_settings settings[])
- {
- state->fastSkipTreshold = settings->fastSkipTreshold;
- }
- inline void CompressBlockETC1(uniform rgba_surface src[], int xx, uniform int yy, uniform uint8 dst[], uniform etc_enc_settings settings[])
- {
- etc_enc_state _state;
- varying etc_enc_state* uniform state = &_state;
- etc_enc_copy_settings(state, settings);
- load_block_interleaved(state->block, src, xx, yy);
- state->best_err = FLT_MAX;
- CompressBlockETC1_core(state);
- store_data(dst, src->width, xx, yy, state->best_data, 2);
- }
- export void CompressBlocksETC1_ispc(uniform rgba_surface src[], uniform uint8 dst[], uniform etc_enc_settings settings[])
- {
- const int h_4=src->height/4, w_4=src->width/4;
- for (uniform int yy = 0; yy<h_4; yy++)
- for (uniform int xx = 0; xx<w_4; xx++) // foreach (xx = 0 ... src->width/4)
- {
- CompressBlockETC1(src, xx, yy, dst, settings);
- }
- }
- } // namespace ispc ESENTHEL CHANGED
|