ProcessRGB.cpp 165 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210
  1. #include <array>
  2. #include <string.h>
  3. #include <limits>
  4. #ifdef __ARM_NEON
  5. # include <arm_neon.h>
  6. #endif
  7. #include "Dither.hpp"
  8. #include "ForceInline.hpp"
  9. #include "Math.hpp"
  10. #include "ProcessCommon.hpp"
  11. #include "ProcessRGB.hpp"
  12. #include "Tables.hpp"
  13. #include "Vector.hpp"
  14. #if defined __SSE4_1__ || defined __AVX2__ || defined _MSC_VER
  15. # ifdef _MSC_VER
  16. # include <intrin.h>
  17. # include <Windows.h>
  18. # define _bswap(x) _byteswap_ulong(x)
  19. # define _bswap64(x) _byteswap_uint64(x)
  20. # else
  21. # include <x86intrin.h>
  22. # endif
  23. #endif
  24. #ifndef _bswap
  25. # define _bswap(x) __builtin_bswap32(x)
  26. # define _bswap64(x) __builtin_bswap64(x)
  27. #endif
  28. static const uint32_t MaxError = 1065369600; // ((38+76+14) * 255)^2
  29. // common T-/H-mode table
  30. static uint8_t tableTH[8] = { 3, 6, 11, 16, 23, 32, 41, 64 };
  31. // thresholds for the early compression-mode decision scheme
  32. // default: 0.03, 0.09, and 0.38
  33. float ecmd_threshold[3] = { 0.03f, 0.09f, 0.38f };
  34. static const uint8_t ModeUndecided = 0;
  35. static const uint8_t ModePlanar = 0x1;
  36. static const uint8_t ModeTH = 0x2;
  37. const unsigned int R = 2;
  38. const unsigned int G = 1;
  39. const unsigned int B = 0;
  40. struct Luma
  41. {
  42. #ifdef __AVX2__
  43. float max, min;
  44. uint8_t minIdx = 255, maxIdx = 255;
  45. __m128i luma8;
  46. #elif defined __ARM_NEON && defined __aarch64__
  47. float max, min;
  48. uint8_t minIdx = 255, maxIdx = 255;
  49. uint8x16_t luma8;
  50. #else
  51. uint8_t max = 0, min = 255, maxIdx = 0, minIdx = 0;
  52. uint8_t val[16];
  53. #endif
  54. };
  55. #ifdef __AVX2__
  56. struct Plane
  57. {
  58. uint64_t plane;
  59. uint64_t error;
  60. __m256i sum4;
  61. };
  62. #endif
  63. #if defined __AVX2__ || (defined __ARM_NEON && defined __aarch64__)
  64. struct Channels
  65. {
  66. #ifdef __AVX2__
  67. __m128i r8, g8, b8;
  68. #elif defined __ARM_NEON && defined __aarch64__
  69. uint8x16x2_t r, g, b;
  70. #endif
  71. };
  72. #endif
  73. namespace
  74. {
  75. static etcpak_force_inline uint8_t clamp( uint8_t min, int16_t val, uint8_t max )
  76. {
  77. return val < min ? min : ( val > max ? max : val );
  78. }
  79. static etcpak_force_inline uint8_t clampMin( uint8_t min, int16_t val )
  80. {
  81. return val < min ? min : val;
  82. }
  83. static etcpak_force_inline uint8_t clampMax( int16_t val, uint8_t max )
  84. {
  85. return val > max ? max : val;
  86. }
  87. // slightly faster than std::sort
  88. static void insertionSort( uint8_t* arr1, uint8_t* arr2 )
  89. {
  90. for( uint8_t i = 1; i < 16; ++i )
  91. {
  92. uint8_t value = arr1[i];
  93. uint8_t hole = i;
  94. for( ; hole > 0 && value < arr1[hole - 1]; --hole )
  95. {
  96. arr1[hole] = arr1[hole - 1];
  97. arr2[hole] = arr2[hole - 1];
  98. }
  99. arr1[hole] = value;
  100. arr2[hole] = i;
  101. }
  102. }
  103. //converts indices from |a0|a1|e0|e1|i0|i1|m0|m1|b0|b1|f0|f1|j0|j1|n0|n1|c0|c1|g0|g1|k0|k1|o0|o1|d0|d1|h0|h1|l0|l1|p0|p1| previously used by T- and H-modes
  104. // into |p0|o0|n0|m0|l0|k0|j0|i0|h0|g0|f0|e0|d0|c0|b0|a0|p1|o1|n1|m1|l1|k1|j1|i1|h1|g1|f1|e1|d1|c1|b1|a1| which should be used for all modes.
  105. // NO WARRANTY --- SEE STATEMENT IN TOP OF FILE (C) Ericsson AB 2005-2013. All Rights Reserved.
  106. static etcpak_force_inline int indexConversion( int pixelIndices )
  107. {
  108. int correctIndices = 0;
  109. int LSB[4][4];
  110. int MSB[4][4];
  111. int shift = 0;
  112. for( int y = 3; y >= 0; y-- )
  113. {
  114. for( int x = 3; x >= 0; x-- )
  115. {
  116. LSB[x][y] = ( pixelIndices >> shift ) & 1;
  117. shift++;
  118. MSB[x][y] = ( pixelIndices >> shift ) & 1;
  119. shift++;
  120. }
  121. }
  122. shift = 0;
  123. for( int x = 0; x < 4; x++ )
  124. {
  125. for( int y = 0; y < 4; y++ )
  126. {
  127. correctIndices |= ( LSB[x][y] << shift );
  128. correctIndices |= ( MSB[x][y] << ( 16 + shift ) );
  129. shift++;
  130. }
  131. }
  132. return correctIndices;
  133. }
  134. // Swapping two RGB-colors
  135. // NO WARRANTY --- SEE STATEMENT IN TOP OF FILE (C) Ericsson AB 2005-2013. All Rights Reserved.
  136. static etcpak_force_inline void swapColors( uint8_t( colors )[2][3] )
  137. {
  138. uint8_t temp = colors[0][R];
  139. colors[0][R] = colors[1][R];
  140. colors[1][R] = temp;
  141. temp = colors[0][G];
  142. colors[0][G] = colors[1][G];
  143. colors[1][G] = temp;
  144. temp = colors[0][B];
  145. colors[0][B] = colors[1][B];
  146. colors[1][B] = temp;
  147. }
  148. // calculates quantized colors for T or H modes
  149. void compressColor( uint8_t( currColor )[2][3], uint8_t( quantColor )[2][3], bool t_mode )
  150. {
  151. if( t_mode )
  152. {
  153. quantColor[0][R] = clampMax( 15 * ( currColor[0][R] + 8 ) / 255, 15 );
  154. quantColor[0][G] = clampMax( 15 * ( currColor[0][G] + 8 ) / 255, 15 );
  155. quantColor[0][B] = clampMax( 15 * ( currColor[0][B] + 8 ) / 255, 15 );
  156. }
  157. else // clamped to [1,14] to get a wider range
  158. {
  159. quantColor[0][R] = clamp( 1, 15 * ( currColor[0][R] + 8 ) / 255, 14 );
  160. quantColor[0][G] = clamp( 1, 15 * ( currColor[0][G] + 8 ) / 255, 14 );
  161. quantColor[0][B] = clamp( 1, 15 * ( currColor[0][B] + 8 ) / 255, 14 );
  162. }
  163. // clamped to [1,14] to get a wider range
  164. quantColor[1][R] = clamp( 1, 15 * ( currColor[1][R] + 8 ) / 255, 14 );
  165. quantColor[1][G] = clamp( 1, 15 * ( currColor[1][G] + 8 ) / 255, 14 );
  166. quantColor[1][B] = clamp( 1, 15 * ( currColor[1][B] + 8 ) / 255, 14 );
  167. }
  168. // three decoding functions come from ETCPACK v2.74 and are slightly changed.
  169. static etcpak_force_inline void decompressColor( uint8_t( colorsRGB444 )[2][3], uint8_t( colors )[2][3] )
  170. {
  171. // The color should be retrieved as:
  172. //
  173. // c = round(255/(r_bits^2-1))*comp_color
  174. //
  175. // This is similar to bit replication
  176. //
  177. // Note -- this code only work for bit replication from 4 bits and up --- 3 bits needs
  178. // two copy operations.
  179. colors[0][R] = ( colorsRGB444[0][R] << 4 ) | colorsRGB444[0][R];
  180. colors[0][G] = ( colorsRGB444[0][G] << 4 ) | colorsRGB444[0][G];
  181. colors[0][B] = ( colorsRGB444[0][B] << 4 ) | colorsRGB444[0][B];
  182. colors[1][R] = ( colorsRGB444[1][R] << 4 ) | colorsRGB444[1][R];
  183. colors[1][G] = ( colorsRGB444[1][G] << 4 ) | colorsRGB444[1][G];
  184. colors[1][B] = ( colorsRGB444[1][B] << 4 ) | colorsRGB444[1][B];
  185. }
  186. // calculates the paint colors from the block colors
  187. // using a distance d and one of the H- or T-patterns.
  188. static void calculatePaintColors59T( uint8_t d, uint8_t( colors )[2][3], uint8_t( pColors )[4][3] )
  189. {
  190. //////////////////////////////////////////////
  191. //
  192. // C3 C1 C4----C1---C2
  193. // | | |
  194. // | | |
  195. // |-------| |
  196. // | | |
  197. // | | |
  198. // C4 C2 C3
  199. //
  200. //////////////////////////////////////////////
  201. // C4
  202. pColors[3][R] = clampMin( 0, colors[1][R] - tableTH[d] );
  203. pColors[3][G] = clampMin( 0, colors[1][G] - tableTH[d] );
  204. pColors[3][B] = clampMin( 0, colors[1][B] - tableTH[d] );
  205. // C3
  206. pColors[0][R] = colors[0][R];
  207. pColors[0][G] = colors[0][G];
  208. pColors[0][B] = colors[0][B];
  209. // C2
  210. pColors[1][R] = clampMax( colors[1][R] + tableTH[d], 255 );
  211. pColors[1][G] = clampMax( colors[1][G] + tableTH[d], 255 );
  212. pColors[1][B] = clampMax( colors[1][B] + tableTH[d], 255 );
  213. // C1
  214. pColors[2][R] = colors[1][R];
  215. pColors[2][G] = colors[1][G];
  216. pColors[2][B] = colors[1][B];
  217. }
  218. static void calculatePaintColors58H( uint8_t d, uint8_t( colors )[2][3], uint8_t( pColors )[4][3] )
  219. {
  220. pColors[3][R] = clampMin( 0, colors[1][R] - tableTH[d] );
  221. pColors[3][G] = clampMin( 0, colors[1][G] - tableTH[d] );
  222. pColors[3][B] = clampMin( 0, colors[1][B] - tableTH[d] );
  223. // C1
  224. pColors[0][R] = clampMax( colors[0][R] + tableTH[d], 255 );
  225. pColors[0][G] = clampMax( colors[0][G] + tableTH[d], 255 );
  226. pColors[0][B] = clampMax( colors[0][B] + tableTH[d], 255 );
  227. // C2
  228. pColors[1][R] = clampMin( 0, colors[0][R] - tableTH[d] );
  229. pColors[1][G] = clampMin( 0, colors[0][G] - tableTH[d] );
  230. pColors[1][B] = clampMin( 0, colors[0][B] - tableTH[d] );
  231. // C3
  232. pColors[2][R] = clampMax( colors[1][R] + tableTH[d], 255 );
  233. pColors[2][G] = clampMax( colors[1][G] + tableTH[d], 255 );
  234. pColors[2][B] = clampMax( colors[1][B] + tableTH[d], 255 );
  235. }
  236. #if defined _MSC_VER && !defined __clang__
  237. static etcpak_force_inline unsigned long _bit_scan_forward( unsigned long mask )
  238. {
  239. unsigned long ret;
  240. _BitScanForward( &ret, mask );
  241. return ret;
  242. }
  243. #endif
  244. typedef std::array<uint16_t, 4> v4i;
  245. #ifdef __AVX2__
  246. static etcpak_force_inline __m256i Sum4_AVX2( const uint8_t* data) noexcept
  247. {
  248. __m128i d0 = _mm_loadu_si128(((__m128i*)data) + 0);
  249. __m128i d1 = _mm_loadu_si128(((__m128i*)data) + 1);
  250. __m128i d2 = _mm_loadu_si128(((__m128i*)data) + 2);
  251. __m128i d3 = _mm_loadu_si128(((__m128i*)data) + 3);
  252. __m128i dm0 = _mm_and_si128(d0, _mm_set1_epi32(0x00FFFFFF));
  253. __m128i dm1 = _mm_and_si128(d1, _mm_set1_epi32(0x00FFFFFF));
  254. __m128i dm2 = _mm_and_si128(d2, _mm_set1_epi32(0x00FFFFFF));
  255. __m128i dm3 = _mm_and_si128(d3, _mm_set1_epi32(0x00FFFFFF));
  256. __m256i t0 = _mm256_cvtepu8_epi16(dm0);
  257. __m256i t1 = _mm256_cvtepu8_epi16(dm1);
  258. __m256i t2 = _mm256_cvtepu8_epi16(dm2);
  259. __m256i t3 = _mm256_cvtepu8_epi16(dm3);
  260. __m256i sum0 = _mm256_add_epi16(t0, t1);
  261. __m256i sum1 = _mm256_add_epi16(t2, t3);
  262. __m256i s0 = _mm256_permute2x128_si256(sum0, sum1, (0) | (3 << 4)); // 0, 0, 3, 3
  263. __m256i s1 = _mm256_permute2x128_si256(sum0, sum1, (1) | (2 << 4)); // 1, 1, 2, 2
  264. __m256i s2 = _mm256_permute4x64_epi64(s0, _MM_SHUFFLE(1, 3, 0, 2));
  265. __m256i s3 = _mm256_permute4x64_epi64(s0, _MM_SHUFFLE(0, 2, 1, 3));
  266. __m256i s4 = _mm256_permute4x64_epi64(s1, _MM_SHUFFLE(3, 1, 0, 2));
  267. __m256i s5 = _mm256_permute4x64_epi64(s1, _MM_SHUFFLE(2, 0, 1, 3));
  268. __m256i sum5 = _mm256_add_epi16(s2, s3); // 3, 0, 3, 0
  269. __m256i sum6 = _mm256_add_epi16(s4, s5); // 2, 1, 1, 2
  270. return _mm256_add_epi16(sum5, sum6); // 3+2, 0+1, 3+1, 3+2
  271. }
  272. static etcpak_force_inline __m256i Average_AVX2( const __m256i data) noexcept
  273. {
  274. __m256i a = _mm256_add_epi16(data, _mm256_set1_epi16(4));
  275. return _mm256_srli_epi16(a, 3);
  276. }
  277. static etcpak_force_inline __m128i CalcErrorBlock_AVX2( const __m256i data, const v4i a[8]) noexcept
  278. {
  279. //
  280. __m256i a0 = _mm256_load_si256((__m256i*)a[0].data());
  281. __m256i a1 = _mm256_load_si256((__m256i*)a[4].data());
  282. // err = 8 * ( sq( average[0] ) + sq( average[1] ) + sq( average[2] ) );
  283. __m256i a4 = _mm256_madd_epi16(a0, a0);
  284. __m256i a5 = _mm256_madd_epi16(a1, a1);
  285. __m256i a6 = _mm256_hadd_epi32(a4, a5);
  286. __m256i a7 = _mm256_slli_epi32(a6, 3);
  287. __m256i a8 = _mm256_add_epi32(a7, _mm256_set1_epi32(0x3FFFFFFF)); // Big value to prevent negative values, but small enough to prevent overflow
  288. // average is not swapped
  289. // err -= block[0] * 2 * average[0];
  290. // err -= block[1] * 2 * average[1];
  291. // err -= block[2] * 2 * average[2];
  292. __m256i a2 = _mm256_slli_epi16(a0, 1);
  293. __m256i a3 = _mm256_slli_epi16(a1, 1);
  294. __m256i b0 = _mm256_madd_epi16(a2, data);
  295. __m256i b1 = _mm256_madd_epi16(a3, data);
  296. __m256i b2 = _mm256_hadd_epi32(b0, b1);
  297. __m256i b3 = _mm256_sub_epi32(a8, b2);
  298. __m256i b4 = _mm256_hadd_epi32(b3, b3);
  299. __m256i b5 = _mm256_permutevar8x32_epi32(b4, _mm256_set_epi32(0, 0, 0, 0, 5, 1, 4, 0));
  300. return _mm256_castsi256_si128(b5);
  301. }
  302. static etcpak_force_inline void ProcessAverages_AVX2(const __m256i d, v4i a[8] ) noexcept
  303. {
  304. __m256i t = _mm256_add_epi16(_mm256_mullo_epi16(d, _mm256_set1_epi16(31)), _mm256_set1_epi16(128));
  305. __m256i c = _mm256_srli_epi16(_mm256_add_epi16(t, _mm256_srli_epi16(t, 8)), 8);
  306. __m256i c1 = _mm256_shuffle_epi32(c, _MM_SHUFFLE(3, 2, 3, 2));
  307. __m256i diff = _mm256_sub_epi16(c, c1);
  308. diff = _mm256_max_epi16(diff, _mm256_set1_epi16(-4));
  309. diff = _mm256_min_epi16(diff, _mm256_set1_epi16(3));
  310. __m256i co = _mm256_add_epi16(c1, diff);
  311. c = _mm256_blend_epi16(co, c, 0xF0);
  312. __m256i a0 = _mm256_or_si256(_mm256_slli_epi16(c, 3), _mm256_srli_epi16(c, 2));
  313. _mm256_store_si256((__m256i*)a[4].data(), a0);
  314. __m256i t0 = _mm256_add_epi16(_mm256_mullo_epi16(d, _mm256_set1_epi16(15)), _mm256_set1_epi16(128));
  315. __m256i t1 = _mm256_srli_epi16(_mm256_add_epi16(t0, _mm256_srli_epi16(t0, 8)), 8);
  316. __m256i t2 = _mm256_or_si256(t1, _mm256_slli_epi16(t1, 4));
  317. _mm256_store_si256((__m256i*)a[0].data(), t2);
  318. }
  319. static etcpak_force_inline uint64_t EncodeAverages_AVX2( const v4i a[8], size_t idx ) noexcept
  320. {
  321. uint64_t d = ( idx << 24 );
  322. size_t base = idx << 1;
  323. __m128i a0 = _mm_load_si128((const __m128i*)a[base].data());
  324. __m128i r0, r1;
  325. if( ( idx & 0x2 ) == 0 )
  326. {
  327. r0 = _mm_srli_epi16(a0, 4);
  328. __m128i a1 = _mm_unpackhi_epi64(r0, r0);
  329. r1 = _mm_slli_epi16(a1, 4);
  330. }
  331. else
  332. {
  333. __m128i a1 = _mm_and_si128(a0, _mm_set1_epi16(-8));
  334. r0 = _mm_unpackhi_epi64(a1, a1);
  335. __m128i a2 = _mm_sub_epi16(a1, r0);
  336. __m128i a3 = _mm_srai_epi16(a2, 3);
  337. r1 = _mm_and_si128(a3, _mm_set1_epi16(0x07));
  338. }
  339. __m128i r2 = _mm_or_si128(r0, r1);
  340. // do missing swap for average values
  341. __m128i r3 = _mm_shufflelo_epi16(r2, _MM_SHUFFLE(3, 0, 1, 2));
  342. __m128i r4 = _mm_packus_epi16(r3, _mm_setzero_si128());
  343. d |= _mm_cvtsi128_si32(r4);
  344. return d;
  345. }
  346. static etcpak_force_inline uint64_t CheckSolid_AVX2( const uint8_t* src ) noexcept
  347. {
  348. __m256i d0 = _mm256_loadu_si256(((__m256i*)src) + 0);
  349. __m256i d1 = _mm256_loadu_si256(((__m256i*)src) + 1);
  350. __m256i c = _mm256_broadcastd_epi32(_mm256_castsi256_si128(d0));
  351. __m256i c0 = _mm256_cmpeq_epi8(d0, c);
  352. __m256i c1 = _mm256_cmpeq_epi8(d1, c);
  353. __m256i m = _mm256_and_si256(c0, c1);
  354. if (!_mm256_testc_si256(m, _mm256_set1_epi32(-1)))
  355. {
  356. return 0;
  357. }
  358. return 0x02000000 |
  359. ( (unsigned int)( src[0] & 0xF8 ) << 16 ) |
  360. ( (unsigned int)( src[1] & 0xF8 ) << 8 ) |
  361. ( (unsigned int)( src[2] & 0xF8 ) );
  362. }
  363. static etcpak_force_inline __m128i PrepareAverages_AVX2( v4i a[8], const uint8_t* src) noexcept
  364. {
  365. __m256i sum4 = Sum4_AVX2( src );
  366. ProcessAverages_AVX2(Average_AVX2( sum4 ), a );
  367. return CalcErrorBlock_AVX2( sum4, a);
  368. }
  369. static etcpak_force_inline __m128i PrepareAverages_AVX2( v4i a[8], const __m256i sum4) noexcept
  370. {
  371. ProcessAverages_AVX2(Average_AVX2( sum4 ), a );
  372. return CalcErrorBlock_AVX2( sum4, a);
  373. }
  374. static etcpak_force_inline void FindBestFit_4x2_AVX2( uint32_t terr[2][8], uint32_t tsel[8], v4i a[8], const uint32_t offset, const uint8_t* data) noexcept
  375. {
  376. __m256i sel0 = _mm256_setzero_si256();
  377. __m256i sel1 = _mm256_setzero_si256();
  378. for (unsigned int j = 0; j < 2; ++j)
  379. {
  380. unsigned int bid = offset + 1 - j;
  381. __m256i squareErrorSum = _mm256_setzero_si256();
  382. __m128i a0 = _mm_loadl_epi64((const __m128i*)a[bid].data());
  383. __m256i a1 = _mm256_broadcastq_epi64(a0);
  384. // Processing one full row each iteration
  385. for (size_t i = 0; i < 8; i += 4)
  386. {
  387. __m128i rgb = _mm_loadu_si128((const __m128i*)(data + i * 4));
  388. __m256i rgb16 = _mm256_cvtepu8_epi16(rgb);
  389. __m256i d = _mm256_sub_epi16(a1, rgb16);
  390. // The scaling values are divided by two and rounded, to allow the differences to be in the range of signed int16
  391. // This produces slightly different results, but is significant faster
  392. __m256i pixel0 = _mm256_madd_epi16(d, _mm256_set_epi16(0, 38, 76, 14, 0, 38, 76, 14, 0, 38, 76, 14, 0, 38, 76, 14));
  393. __m256i pixel1 = _mm256_packs_epi32(pixel0, pixel0);
  394. __m256i pixel2 = _mm256_hadd_epi16(pixel1, pixel1);
  395. __m128i pixel3 = _mm256_castsi256_si128(pixel2);
  396. __m128i pix0 = _mm_broadcastw_epi16(pixel3);
  397. __m128i pix1 = _mm_broadcastw_epi16(_mm_srli_epi32(pixel3, 16));
  398. __m256i pixel = _mm256_insertf128_si256(_mm256_castsi128_si256(pix0), pix1, 1);
  399. // Processing first two pixels of the row
  400. {
  401. __m256i pix = _mm256_abs_epi16(pixel);
  402. // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
  403. // Since the selector table is symmetrical, we need to calculate the difference only for half of the entries.
  404. __m256i error0 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[0])));
  405. __m256i error1 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[1])));
  406. __m256i minIndex0 = _mm256_and_si256(_mm256_cmpgt_epi16(error0, error1), _mm256_set1_epi16(1));
  407. __m256i minError = _mm256_min_epi16(error0, error1);
  408. // Exploiting symmetry of the selector table and use the sign bit
  409. // This produces slightly different results, but is significant faster
  410. __m256i minIndex1 = _mm256_srli_epi16(pixel, 15);
  411. // Interleaving values so madd instruction can be used
  412. __m256i minErrorLo = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(1, 1, 0, 0));
  413. __m256i minErrorHi = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(3, 3, 2, 2));
  414. __m256i minError2 = _mm256_unpacklo_epi16(minErrorLo, minErrorHi);
  415. // Squaring the minimum error to produce correct values when adding
  416. __m256i squareError = _mm256_madd_epi16(minError2, minError2);
  417. squareErrorSum = _mm256_add_epi32(squareErrorSum, squareError);
  418. // Packing selector bits
  419. __m256i minIndexLo2 = _mm256_sll_epi16(minIndex0, _mm_cvtsi64_si128(i + j * 8));
  420. __m256i minIndexHi2 = _mm256_sll_epi16(minIndex1, _mm_cvtsi64_si128(i + j * 8));
  421. sel0 = _mm256_or_si256(sel0, minIndexLo2);
  422. sel1 = _mm256_or_si256(sel1, minIndexHi2);
  423. }
  424. pixel3 = _mm256_extracti128_si256(pixel2, 1);
  425. pix0 = _mm_broadcastw_epi16(pixel3);
  426. pix1 = _mm_broadcastw_epi16(_mm_srli_epi32(pixel3, 16));
  427. pixel = _mm256_insertf128_si256(_mm256_castsi128_si256(pix0), pix1, 1);
  428. // Processing second two pixels of the row
  429. {
  430. __m256i pix = _mm256_abs_epi16(pixel);
  431. // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
  432. // Since the selector table is symmetrical, we need to calculate the difference only for half of the entries.
  433. __m256i error0 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[0])));
  434. __m256i error1 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[1])));
  435. __m256i minIndex0 = _mm256_and_si256(_mm256_cmpgt_epi16(error0, error1), _mm256_set1_epi16(1));
  436. __m256i minError = _mm256_min_epi16(error0, error1);
  437. // Exploiting symmetry of the selector table and use the sign bit
  438. __m256i minIndex1 = _mm256_srli_epi16(pixel, 15);
  439. // Interleaving values so madd instruction can be used
  440. __m256i minErrorLo = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(1, 1, 0, 0));
  441. __m256i minErrorHi = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(3, 3, 2, 2));
  442. __m256i minError2 = _mm256_unpacklo_epi16(minErrorLo, minErrorHi);
  443. // Squaring the minimum error to produce correct values when adding
  444. __m256i squareError = _mm256_madd_epi16(minError2, minError2);
  445. squareErrorSum = _mm256_add_epi32(squareErrorSum, squareError);
  446. // Packing selector bits
  447. __m256i minIndexLo2 = _mm256_sll_epi16(minIndex0, _mm_cvtsi64_si128(i + j * 8));
  448. __m256i minIndexHi2 = _mm256_sll_epi16(minIndex1, _mm_cvtsi64_si128(i + j * 8));
  449. __m256i minIndexLo3 = _mm256_slli_epi16(minIndexLo2, 2);
  450. __m256i minIndexHi3 = _mm256_slli_epi16(minIndexHi2, 2);
  451. sel0 = _mm256_or_si256(sel0, minIndexLo3);
  452. sel1 = _mm256_or_si256(sel1, minIndexHi3);
  453. }
  454. }
  455. data += 8 * 4;
  456. _mm256_store_si256((__m256i*)terr[1 - j], squareErrorSum);
  457. }
  458. // Interleave selector bits
  459. __m256i minIndexLo0 = _mm256_unpacklo_epi16(sel0, sel1);
  460. __m256i minIndexHi0 = _mm256_unpackhi_epi16(sel0, sel1);
  461. __m256i minIndexLo1 = _mm256_permute2x128_si256(minIndexLo0, minIndexHi0, (0) | (2 << 4));
  462. __m256i minIndexHi1 = _mm256_permute2x128_si256(minIndexLo0, minIndexHi0, (1) | (3 << 4));
  463. __m256i minIndexHi2 = _mm256_slli_epi32(minIndexHi1, 1);
  464. __m256i sel = _mm256_or_si256(minIndexLo1, minIndexHi2);
  465. _mm256_store_si256((__m256i*)tsel, sel);
  466. }
  467. static etcpak_force_inline void FindBestFit_2x4_AVX2( uint32_t terr[2][8], uint32_t tsel[8], v4i a[8], const uint32_t offset, const uint8_t* data) noexcept
  468. {
  469. __m256i sel0 = _mm256_setzero_si256();
  470. __m256i sel1 = _mm256_setzero_si256();
  471. __m256i squareErrorSum0 = _mm256_setzero_si256();
  472. __m256i squareErrorSum1 = _mm256_setzero_si256();
  473. __m128i a0 = _mm_loadl_epi64((const __m128i*)a[offset + 1].data());
  474. __m128i a1 = _mm_loadl_epi64((const __m128i*)a[offset + 0].data());
  475. __m128i a2 = _mm_broadcastq_epi64(a0);
  476. __m128i a3 = _mm_broadcastq_epi64(a1);
  477. __m256i a4 = _mm256_insertf128_si256(_mm256_castsi128_si256(a2), a3, 1);
  478. // Processing one full row each iteration
  479. for (size_t i = 0; i < 16; i += 4)
  480. {
  481. __m128i rgb = _mm_loadu_si128((const __m128i*)(data + i * 4));
  482. __m256i rgb16 = _mm256_cvtepu8_epi16(rgb);
  483. __m256i d = _mm256_sub_epi16(a4, rgb16);
  484. // The scaling values are divided by two and rounded, to allow the differences to be in the range of signed int16
  485. // This produces slightly different results, but is significant faster
  486. __m256i pixel0 = _mm256_madd_epi16(d, _mm256_set_epi16(0, 38, 76, 14, 0, 38, 76, 14, 0, 38, 76, 14, 0, 38, 76, 14));
  487. __m256i pixel1 = _mm256_packs_epi32(pixel0, pixel0);
  488. __m256i pixel2 = _mm256_hadd_epi16(pixel1, pixel1);
  489. __m128i pixel3 = _mm256_castsi256_si128(pixel2);
  490. __m128i pix0 = _mm_broadcastw_epi16(pixel3);
  491. __m128i pix1 = _mm_broadcastw_epi16(_mm_srli_epi32(pixel3, 16));
  492. __m256i pixel = _mm256_insertf128_si256(_mm256_castsi128_si256(pix0), pix1, 1);
  493. // Processing first two pixels of the row
  494. {
  495. __m256i pix = _mm256_abs_epi16(pixel);
  496. // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
  497. // Since the selector table is symmetrical, we need to calculate the difference only for half of the entries.
  498. __m256i error0 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[0])));
  499. __m256i error1 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[1])));
  500. __m256i minIndex0 = _mm256_and_si256(_mm256_cmpgt_epi16(error0, error1), _mm256_set1_epi16(1));
  501. __m256i minError = _mm256_min_epi16(error0, error1);
  502. // Exploiting symmetry of the selector table and use the sign bit
  503. __m256i minIndex1 = _mm256_srli_epi16(pixel, 15);
  504. // Interleaving values so madd instruction can be used
  505. __m256i minErrorLo = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(1, 1, 0, 0));
  506. __m256i minErrorHi = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(3, 3, 2, 2));
  507. __m256i minError2 = _mm256_unpacklo_epi16(minErrorLo, minErrorHi);
  508. // Squaring the minimum error to produce correct values when adding
  509. __m256i squareError = _mm256_madd_epi16(minError2, minError2);
  510. squareErrorSum0 = _mm256_add_epi32(squareErrorSum0, squareError);
  511. // Packing selector bits
  512. __m256i minIndexLo2 = _mm256_sll_epi16(minIndex0, _mm_cvtsi64_si128(i));
  513. __m256i minIndexHi2 = _mm256_sll_epi16(minIndex1, _mm_cvtsi64_si128(i));
  514. sel0 = _mm256_or_si256(sel0, minIndexLo2);
  515. sel1 = _mm256_or_si256(sel1, minIndexHi2);
  516. }
  517. pixel3 = _mm256_extracti128_si256(pixel2, 1);
  518. pix0 = _mm_broadcastw_epi16(pixel3);
  519. pix1 = _mm_broadcastw_epi16(_mm_srli_epi32(pixel3, 16));
  520. pixel = _mm256_insertf128_si256(_mm256_castsi128_si256(pix0), pix1, 1);
  521. // Processing second two pixels of the row
  522. {
  523. __m256i pix = _mm256_abs_epi16(pixel);
  524. // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
  525. // Since the selector table is symmetrical, we need to calculate the difference only for half of the entries.
  526. __m256i error0 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[0])));
  527. __m256i error1 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[1])));
  528. __m256i minIndex0 = _mm256_and_si256(_mm256_cmpgt_epi16(error0, error1), _mm256_set1_epi16(1));
  529. __m256i minError = _mm256_min_epi16(error0, error1);
  530. // Exploiting symmetry of the selector table and use the sign bit
  531. __m256i minIndex1 = _mm256_srli_epi16(pixel, 15);
  532. // Interleaving values so madd instruction can be used
  533. __m256i minErrorLo = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(1, 1, 0, 0));
  534. __m256i minErrorHi = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(3, 3, 2, 2));
  535. __m256i minError2 = _mm256_unpacklo_epi16(minErrorLo, minErrorHi);
  536. // Squaring the minimum error to produce correct values when adding
  537. __m256i squareError = _mm256_madd_epi16(minError2, minError2);
  538. squareErrorSum1 = _mm256_add_epi32(squareErrorSum1, squareError);
  539. // Packing selector bits
  540. __m256i minIndexLo2 = _mm256_sll_epi16(minIndex0, _mm_cvtsi64_si128(i));
  541. __m256i minIndexHi2 = _mm256_sll_epi16(minIndex1, _mm_cvtsi64_si128(i));
  542. __m256i minIndexLo3 = _mm256_slli_epi16(minIndexLo2, 2);
  543. __m256i minIndexHi3 = _mm256_slli_epi16(minIndexHi2, 2);
  544. sel0 = _mm256_or_si256(sel0, minIndexLo3);
  545. sel1 = _mm256_or_si256(sel1, minIndexHi3);
  546. }
  547. }
  548. _mm256_store_si256((__m256i*)terr[1], squareErrorSum0);
  549. _mm256_store_si256((__m256i*)terr[0], squareErrorSum1);
  550. // Interleave selector bits
  551. __m256i minIndexLo0 = _mm256_unpacklo_epi16(sel0, sel1);
  552. __m256i minIndexHi0 = _mm256_unpackhi_epi16(sel0, sel1);
  553. __m256i minIndexLo1 = _mm256_permute2x128_si256(minIndexLo0, minIndexHi0, (0) | (2 << 4));
  554. __m256i minIndexHi1 = _mm256_permute2x128_si256(minIndexLo0, minIndexHi0, (1) | (3 << 4));
  555. __m256i minIndexHi2 = _mm256_slli_epi32(minIndexHi1, 1);
  556. __m256i sel = _mm256_or_si256(minIndexLo1, minIndexHi2);
  557. _mm256_store_si256((__m256i*)tsel, sel);
  558. }
  559. static etcpak_force_inline uint64_t EncodeSelectors_AVX2( uint64_t d, const uint32_t terr[2][8], const uint32_t tsel[8], const bool rotate) noexcept
  560. {
  561. size_t tidx[2];
  562. // Get index of minimum error (terr[0] and terr[1])
  563. __m256i err0 = _mm256_load_si256((const __m256i*)terr[0]);
  564. __m256i err1 = _mm256_load_si256((const __m256i*)terr[1]);
  565. __m256i errLo = _mm256_permute2x128_si256(err0, err1, (0) | (2 << 4));
  566. __m256i errHi = _mm256_permute2x128_si256(err0, err1, (1) | (3 << 4));
  567. __m256i errMin0 = _mm256_min_epu32(errLo, errHi);
  568. __m256i errMin1 = _mm256_shuffle_epi32(errMin0, _MM_SHUFFLE(2, 3, 0, 1));
  569. __m256i errMin2 = _mm256_min_epu32(errMin0, errMin1);
  570. __m256i errMin3 = _mm256_shuffle_epi32(errMin2, _MM_SHUFFLE(1, 0, 3, 2));
  571. __m256i errMin4 = _mm256_min_epu32(errMin3, errMin2);
  572. __m256i errMin5 = _mm256_permute2x128_si256(errMin4, errMin4, (0) | (0 << 4));
  573. __m256i errMin6 = _mm256_permute2x128_si256(errMin4, errMin4, (1) | (1 << 4));
  574. __m256i errMask0 = _mm256_cmpeq_epi32(errMin5, err0);
  575. __m256i errMask1 = _mm256_cmpeq_epi32(errMin6, err1);
  576. uint32_t mask0 = _mm256_movemask_epi8(errMask0);
  577. uint32_t mask1 = _mm256_movemask_epi8(errMask1);
  578. tidx[0] = _bit_scan_forward(mask0) >> 2;
  579. tidx[1] = _bit_scan_forward(mask1) >> 2;
  580. d |= tidx[0] << 26;
  581. d |= tidx[1] << 29;
  582. unsigned int t0 = tsel[tidx[0]];
  583. unsigned int t1 = tsel[tidx[1]];
  584. if (!rotate)
  585. {
  586. t0 &= 0xFF00FF00;
  587. t1 &= 0x00FF00FF;
  588. }
  589. else
  590. {
  591. t0 &= 0xCCCCCCCC;
  592. t1 &= 0x33333333;
  593. }
  594. // Flip selectors from sign bit
  595. unsigned int t2 = (t0 | t1) ^ 0xFFFF0000;
  596. return d | static_cast<uint64_t>(_bswap(t2)) << 32;
  597. }
  598. static etcpak_force_inline __m128i r6g7b6_AVX2(__m128 cof, __m128 chf, __m128 cvf) noexcept
  599. {
  600. __m128i co = _mm_cvttps_epi32(cof);
  601. __m128i ch = _mm_cvttps_epi32(chf);
  602. __m128i cv = _mm_cvttps_epi32(cvf);
  603. __m128i coh = _mm_packus_epi32(co, ch);
  604. __m128i cv0 = _mm_packus_epi32(cv, _mm_setzero_si128());
  605. __m256i cohv0 = _mm256_inserti128_si256(_mm256_castsi128_si256(coh), cv0, 1);
  606. __m256i cohv1 = _mm256_min_epu16(cohv0, _mm256_set1_epi16(1023));
  607. __m256i cohv2 = _mm256_sub_epi16(cohv1, _mm256_set1_epi16(15));
  608. __m256i cohv3 = _mm256_srai_epi16(cohv2, 1);
  609. __m256i cohvrb0 = _mm256_add_epi16(cohv3, _mm256_set1_epi16(11));
  610. __m256i cohvrb1 = _mm256_add_epi16(cohv3, _mm256_set1_epi16(4));
  611. __m256i cohvg0 = _mm256_add_epi16(cohv3, _mm256_set1_epi16(9));
  612. __m256i cohvg1 = _mm256_add_epi16(cohv3, _mm256_set1_epi16(6));
  613. __m256i cohvrb2 = _mm256_srai_epi16(cohvrb0, 7);
  614. __m256i cohvrb3 = _mm256_srai_epi16(cohvrb1, 7);
  615. __m256i cohvg2 = _mm256_srai_epi16(cohvg0, 8);
  616. __m256i cohvg3 = _mm256_srai_epi16(cohvg1, 8);
  617. __m256i cohvrb4 = _mm256_sub_epi16(cohvrb0, cohvrb2);
  618. __m256i cohvrb5 = _mm256_sub_epi16(cohvrb4, cohvrb3);
  619. __m256i cohvg4 = _mm256_sub_epi16(cohvg0, cohvg2);
  620. __m256i cohvg5 = _mm256_sub_epi16(cohvg4, cohvg3);
  621. __m256i cohvrb6 = _mm256_srai_epi16(cohvrb5, 3);
  622. __m256i cohvg6 = _mm256_srai_epi16(cohvg5, 2);
  623. __m256i cohv4 = _mm256_blend_epi16(cohvg6, cohvrb6, 0x55);
  624. __m128i cohv5 = _mm_packus_epi16(_mm256_castsi256_si128(cohv4), _mm256_extracti128_si256(cohv4, 1));
  625. return _mm_shuffle_epi8(cohv5, _mm_setr_epi8(6, 5, 4, -1, 2, 1, 0, -1, 10, 9, 8, -1, -1, -1, -1, -1));
  626. }
  627. static etcpak_force_inline Plane Planar_AVX2( const Channels& ch, uint8_t& mode, bool useHeuristics )
  628. {
  629. __m128i t0 = _mm_sad_epu8( ch.r8, _mm_setzero_si128() );
  630. __m128i t1 = _mm_sad_epu8( ch.g8, _mm_setzero_si128() );
  631. __m128i t2 = _mm_sad_epu8( ch.b8, _mm_setzero_si128() );
  632. __m128i r8s = _mm_shuffle_epi8( ch.r8, _mm_set_epi8( 0xF, 0xE, 0xB, 0xA, 0x7, 0x6, 0x3, 0x2, 0xD, 0xC, 0x9, 0x8, 0x5, 0x4, 0x1, 0x0 ) );
  633. __m128i g8s = _mm_shuffle_epi8( ch.g8, _mm_set_epi8( 0xF, 0xE, 0xB, 0xA, 0x7, 0x6, 0x3, 0x2, 0xD, 0xC, 0x9, 0x8, 0x5, 0x4, 0x1, 0x0 ) );
  634. __m128i b8s = _mm_shuffle_epi8( ch.b8, _mm_set_epi8( 0xF, 0xE, 0xB, 0xA, 0x7, 0x6, 0x3, 0x2, 0xD, 0xC, 0x9, 0x8, 0x5, 0x4, 0x1, 0x0 ) );
  635. __m128i s0 = _mm_sad_epu8( r8s, _mm_setzero_si128() );
  636. __m128i s1 = _mm_sad_epu8( g8s, _mm_setzero_si128() );
  637. __m128i s2 = _mm_sad_epu8( b8s, _mm_setzero_si128() );
  638. __m256i sr0 = _mm256_insertf128_si256( _mm256_castsi128_si256( t0 ), s0, 1 );
  639. __m256i sg0 = _mm256_insertf128_si256( _mm256_castsi128_si256( t1 ), s1, 1 );
  640. __m256i sb0 = _mm256_insertf128_si256( _mm256_castsi128_si256( t2 ), s2, 1 );
  641. __m256i sr1 = _mm256_slli_epi64( sr0, 32 );
  642. __m256i sg1 = _mm256_slli_epi64( sg0, 16 );
  643. __m256i srb = _mm256_or_si256( sr1, sb0 );
  644. __m256i srgb = _mm256_or_si256( srb, sg1 );
  645. if( mode != ModePlanar && useHeuristics )
  646. {
  647. Plane plane;
  648. plane.sum4 = _mm256_permute4x64_epi64( srgb, _MM_SHUFFLE( 2, 3, 0, 1 ) );
  649. return plane;
  650. }
  651. __m128i t3 = _mm_castps_si128( _mm_shuffle_ps( _mm_castsi128_ps( t0 ), _mm_castsi128_ps( t1 ), _MM_SHUFFLE( 2, 0, 2, 0 ) ) );
  652. __m128i t4 = _mm_shuffle_epi32( t2, _MM_SHUFFLE( 3, 1, 2, 0 ) );
  653. __m128i t5 = _mm_hadd_epi32( t3, t4 );
  654. __m128i t6 = _mm_shuffle_epi32( t5, _MM_SHUFFLE( 1, 1, 1, 1 ) );
  655. __m128i t7 = _mm_shuffle_epi32( t5, _MM_SHUFFLE( 2, 2, 2, 2 ) );
  656. __m256i sr = _mm256_broadcastw_epi16( t5 );
  657. __m256i sg = _mm256_broadcastw_epi16( t6 );
  658. __m256i sb = _mm256_broadcastw_epi16( t7 );
  659. __m256i r08 = _mm256_cvtepu8_epi16( ch.r8 );
  660. __m256i g08 = _mm256_cvtepu8_epi16( ch.g8 );
  661. __m256i b08 = _mm256_cvtepu8_epi16( ch.b8 );
  662. __m256i r16 = _mm256_slli_epi16( r08, 4 );
  663. __m256i g16 = _mm256_slli_epi16( g08, 4 );
  664. __m256i b16 = _mm256_slli_epi16( b08, 4 );
  665. __m256i difR0 = _mm256_sub_epi16( r16, sr );
  666. __m256i difG0 = _mm256_sub_epi16( g16, sg );
  667. __m256i difB0 = _mm256_sub_epi16( b16, sb );
  668. __m256i difRyz = _mm256_madd_epi16( difR0, _mm256_set_epi16( 255, 85, -85, -255, 255, 85, -85, -255, 255, 85, -85, -255, 255, 85, -85, -255 ) );
  669. __m256i difGyz = _mm256_madd_epi16( difG0, _mm256_set_epi16( 255, 85, -85, -255, 255, 85, -85, -255, 255, 85, -85, -255, 255, 85, -85, -255 ) );
  670. __m256i difByz = _mm256_madd_epi16( difB0, _mm256_set_epi16( 255, 85, -85, -255, 255, 85, -85, -255, 255, 85, -85, -255, 255, 85, -85, -255 ) );
  671. __m256i difRxz = _mm256_madd_epi16( difR0, _mm256_set_epi16( 255, 255, 255, 255, 85, 85, 85, 85, -85, -85, -85, -85, -255, -255, -255, -255 ) );
  672. __m256i difGxz = _mm256_madd_epi16( difG0, _mm256_set_epi16( 255, 255, 255, 255, 85, 85, 85, 85, -85, -85, -85, -85, -255, -255, -255, -255 ) );
  673. __m256i difBxz = _mm256_madd_epi16( difB0, _mm256_set_epi16( 255, 255, 255, 255, 85, 85, 85, 85, -85, -85, -85, -85, -255, -255, -255, -255 ) );
  674. __m256i difRGyz = _mm256_hadd_epi32( difRyz, difGyz );
  675. __m256i difByzxz = _mm256_hadd_epi32( difByz, difBxz );
  676. __m256i difRGxz = _mm256_hadd_epi32( difRxz, difGxz );
  677. __m128i sumRGyz = _mm_add_epi32( _mm256_castsi256_si128( difRGyz ), _mm256_extracti128_si256( difRGyz, 1 ) );
  678. __m128i sumByzxz = _mm_add_epi32( _mm256_castsi256_si128( difByzxz ), _mm256_extracti128_si256( difByzxz, 1 ) );
  679. __m128i sumRGxz = _mm_add_epi32( _mm256_castsi256_si128( difRGxz ), _mm256_extracti128_si256( difRGxz, 1 ) );
  680. __m128i sumRGByz = _mm_hadd_epi32( sumRGyz, sumByzxz );
  681. __m128i sumRGByzxz = _mm_hadd_epi32( sumRGxz, sumByzxz );
  682. __m128i sumRGBxz = _mm_shuffle_epi32( sumRGByzxz, _MM_SHUFFLE( 2, 3, 1, 0 ) );
  683. __m128 sumRGByzf = _mm_cvtepi32_ps( sumRGByz );
  684. __m128 sumRGBxzf = _mm_cvtepi32_ps( sumRGBxz );
  685. const float value = ( 255 * 255 * 8.0f + 85 * 85 * 8.0f ) * 16.0f;
  686. __m128 scale = _mm_set1_ps( -4.0f / value );
  687. __m128 af = _mm_mul_ps( sumRGBxzf, scale );
  688. __m128 bf = _mm_mul_ps( sumRGByzf, scale );
  689. __m128 df = _mm_mul_ps( _mm_cvtepi32_ps( t5 ), _mm_set1_ps( 4.0f / 16.0f ) );
  690. // calculating the three colors RGBO, RGBH, and RGBV. RGB = df - af * x - bf * y;
  691. __m128 cof0 = _mm_fnmadd_ps( af, _mm_set1_ps( -255.0f ), _mm_fnmadd_ps( bf, _mm_set1_ps( -255.0f ), df ) );
  692. __m128 chf0 = _mm_fnmadd_ps( af, _mm_set1_ps( 425.0f ), _mm_fnmadd_ps( bf, _mm_set1_ps( -255.0f ), df ) );
  693. __m128 cvf0 = _mm_fnmadd_ps( af, _mm_set1_ps( -255.0f ), _mm_fnmadd_ps( bf, _mm_set1_ps( 425.0f ), df ) );
  694. // convert to r6g7b6
  695. __m128i cohv = r6g7b6_AVX2( cof0, chf0, cvf0 );
  696. uint64_t rgbho = _mm_extract_epi64( cohv, 0 );
  697. uint32_t rgbv0 = _mm_extract_epi32( cohv, 2 );
  698. // Error calculation
  699. uint64_t error = 0;
  700. if( !useHeuristics )
  701. {
  702. auto ro0 = ( rgbho >> 48 ) & 0x3F;
  703. auto go0 = ( rgbho >> 40 ) & 0x7F;
  704. auto bo0 = ( rgbho >> 32 ) & 0x3F;
  705. auto ro1 = ( ro0 >> 4 ) | ( ro0 << 2 );
  706. auto go1 = ( go0 >> 6 ) | ( go0 << 1 );
  707. auto bo1 = ( bo0 >> 4 ) | ( bo0 << 2 );
  708. auto ro2 = ( ro1 << 2 ) + 2;
  709. auto go2 = ( go1 << 2 ) + 2;
  710. auto bo2 = ( bo1 << 2 ) + 2;
  711. __m256i ro3 = _mm256_set1_epi16( ro2 );
  712. __m256i go3 = _mm256_set1_epi16( go2 );
  713. __m256i bo3 = _mm256_set1_epi16( bo2 );
  714. auto rh0 = ( rgbho >> 16 ) & 0x3F;
  715. auto gh0 = ( rgbho >> 8 ) & 0x7F;
  716. auto bh0 = ( rgbho >> 0 ) & 0x3F;
  717. auto rh1 = ( rh0 >> 4 ) | ( rh0 << 2 );
  718. auto gh1 = ( gh0 >> 6 ) | ( gh0 << 1 );
  719. auto bh1 = ( bh0 >> 4 ) | ( bh0 << 2 );
  720. auto rh2 = rh1 - ro1;
  721. auto gh2 = gh1 - go1;
  722. auto bh2 = bh1 - bo1;
  723. __m256i rh3 = _mm256_set1_epi16( rh2 );
  724. __m256i gh3 = _mm256_set1_epi16( gh2 );
  725. __m256i bh3 = _mm256_set1_epi16( bh2 );
  726. auto rv0 = ( rgbv0 >> 16 ) & 0x3F;
  727. auto gv0 = ( rgbv0 >> 8 ) & 0x7F;
  728. auto bv0 = ( rgbv0 >> 0 ) & 0x3F;
  729. auto rv1 = ( rv0 >> 4 ) | ( rv0 << 2 );
  730. auto gv1 = ( gv0 >> 6 ) | ( gv0 << 1 );
  731. auto bv1 = ( bv0 >> 4 ) | ( bv0 << 2 );
  732. auto rv2 = rv1 - ro1;
  733. auto gv2 = gv1 - go1;
  734. auto bv2 = bv1 - bo1;
  735. __m256i rv3 = _mm256_set1_epi16( rv2 );
  736. __m256i gv3 = _mm256_set1_epi16( gv2 );
  737. __m256i bv3 = _mm256_set1_epi16( bv2 );
  738. __m256i x = _mm256_set_epi16( 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 );
  739. __m256i rh4 = _mm256_mullo_epi16( rh3, x );
  740. __m256i gh4 = _mm256_mullo_epi16( gh3, x );
  741. __m256i bh4 = _mm256_mullo_epi16( bh3, x );
  742. __m256i y = _mm256_set_epi16( 3, 2, 1, 0, 3, 2, 1, 0, 3, 2, 1, 0, 3, 2, 1, 0 );
  743. __m256i rv4 = _mm256_mullo_epi16( rv3, y );
  744. __m256i gv4 = _mm256_mullo_epi16( gv3, y );
  745. __m256i bv4 = _mm256_mullo_epi16( bv3, y );
  746. __m256i rxy = _mm256_add_epi16( rh4, rv4 );
  747. __m256i gxy = _mm256_add_epi16( gh4, gv4 );
  748. __m256i bxy = _mm256_add_epi16( bh4, bv4 );
  749. __m256i rp0 = _mm256_add_epi16( rxy, ro3 );
  750. __m256i gp0 = _mm256_add_epi16( gxy, go3 );
  751. __m256i bp0 = _mm256_add_epi16( bxy, bo3 );
  752. __m256i rp1 = _mm256_srai_epi16( rp0, 2 );
  753. __m256i gp1 = _mm256_srai_epi16( gp0, 2 );
  754. __m256i bp1 = _mm256_srai_epi16( bp0, 2 );
  755. __m256i rp2 = _mm256_max_epi16( _mm256_min_epi16( rp1, _mm256_set1_epi16( 255 ) ), _mm256_setzero_si256() );
  756. __m256i gp2 = _mm256_max_epi16( _mm256_min_epi16( gp1, _mm256_set1_epi16( 255 ) ), _mm256_setzero_si256() );
  757. __m256i bp2 = _mm256_max_epi16( _mm256_min_epi16( bp1, _mm256_set1_epi16( 255 ) ), _mm256_setzero_si256() );
  758. __m256i rdif = _mm256_sub_epi16( r08, rp2 );
  759. __m256i gdif = _mm256_sub_epi16( g08, gp2 );
  760. __m256i bdif = _mm256_sub_epi16( b08, bp2 );
  761. __m256i rerr = _mm256_mullo_epi16( rdif, _mm256_set1_epi16( 38 ) );
  762. __m256i gerr = _mm256_mullo_epi16( gdif, _mm256_set1_epi16( 76 ) );
  763. __m256i berr = _mm256_mullo_epi16( bdif, _mm256_set1_epi16( 14 ) );
  764. __m256i sum0 = _mm256_add_epi16( rerr, gerr );
  765. __m256i sum1 = _mm256_add_epi16( sum0, berr );
  766. __m256i sum2 = _mm256_madd_epi16( sum1, sum1 );
  767. __m128i sum3 = _mm_add_epi32( _mm256_castsi256_si128( sum2 ), _mm256_extracti128_si256( sum2, 1 ) );
  768. uint32_t err0 = _mm_extract_epi32( sum3, 0 );
  769. uint32_t err1 = _mm_extract_epi32( sum3, 1 );
  770. uint32_t err2 = _mm_extract_epi32( sum3, 2 );
  771. uint32_t err3 = _mm_extract_epi32( sum3, 3 );
  772. error = err0 + err1 + err2 + err3;
  773. }
  774. /**/
  775. uint32_t rgbv = ( rgbv0 & 0x3F ) | ( ( rgbv0 >> 2 ) & 0x1FC0 ) | ( ( rgbv0 >> 3 ) & 0x7E000 );
  776. uint64_t rgbho0_ = ( rgbho & 0x3F0000003F ) | ( ( rgbho >> 2 ) & 0x1FC000001FC0 ) | ( ( rgbho >> 3 ) & 0x7E0000007E000 );
  777. uint64_t rgbho0 = ( rgbho0_ & 0x7FFFF ) | ( ( rgbho0_ >> 13 ) & 0x3FFFF80000 );
  778. uint32_t hi = rgbv | ((rgbho0 & 0x1FFF) << 19);
  779. rgbho0 >>= 13;
  780. uint32_t lo = ( rgbho0 & 0x1 ) | ( ( rgbho0 & 0x1FE ) << 1 ) | ( ( rgbho0 & 0x600 ) << 2 ) | ( ( rgbho0 & 0x3F800 ) << 5 ) | ( ( rgbho0 & 0x1FC0000 ) << 6 );
  781. uint32_t idx = ( ( rgbho >> 33 ) & 0xF ) | ( ( rgbho >> 41 ) & 0x10 ) | ( ( rgbho >> 48 ) & 0x20 );
  782. lo |= g_flags[idx];
  783. uint64_t result = static_cast<uint32_t>(_bswap(lo));
  784. result |= static_cast<uint64_t>(static_cast<uint32_t>(_bswap(hi))) << 32;
  785. Plane plane;
  786. plane.plane = result;
  787. if( useHeuristics )
  788. {
  789. plane.error = 0;
  790. mode = ModePlanar;
  791. }
  792. else
  793. {
  794. plane.error = error;
  795. }
  796. plane.sum4 = _mm256_permute4x64_epi64(srgb, _MM_SHUFFLE(2, 3, 0, 1));
  797. return plane;
  798. }
  799. static etcpak_force_inline uint64_t EncodeSelectors_AVX2( uint64_t d, const uint32_t terr[2][8], const uint32_t tsel[8], const bool rotate, const uint64_t value, const uint32_t error) noexcept
  800. {
  801. size_t tidx[2];
  802. // Get index of minimum error (terr[0] and terr[1])
  803. __m256i err0 = _mm256_load_si256((const __m256i*)terr[0]);
  804. __m256i err1 = _mm256_load_si256((const __m256i*)terr[1]);
  805. __m256i errLo = _mm256_permute2x128_si256(err0, err1, (0) | (2 << 4));
  806. __m256i errHi = _mm256_permute2x128_si256(err0, err1, (1) | (3 << 4));
  807. __m256i errMin0 = _mm256_min_epu32(errLo, errHi);
  808. __m256i errMin1 = _mm256_shuffle_epi32(errMin0, _MM_SHUFFLE(2, 3, 0, 1));
  809. __m256i errMin2 = _mm256_min_epu32(errMin0, errMin1);
  810. __m256i errMin3 = _mm256_shuffle_epi32(errMin2, _MM_SHUFFLE(1, 0, 3, 2));
  811. __m256i errMin4 = _mm256_min_epu32(errMin3, errMin2);
  812. __m256i errMin5 = _mm256_permute2x128_si256(errMin4, errMin4, (0) | (0 << 4));
  813. __m256i errMin6 = _mm256_permute2x128_si256(errMin4, errMin4, (1) | (1 << 4));
  814. __m256i errMask0 = _mm256_cmpeq_epi32(errMin5, err0);
  815. __m256i errMask1 = _mm256_cmpeq_epi32(errMin6, err1);
  816. uint32_t mask0 = _mm256_movemask_epi8(errMask0);
  817. uint32_t mask1 = _mm256_movemask_epi8(errMask1);
  818. tidx[0] = _bit_scan_forward(mask0) >> 2;
  819. tidx[1] = _bit_scan_forward(mask1) >> 2;
  820. if ((terr[0][tidx[0]] + terr[1][tidx[1]]) >= error)
  821. {
  822. return value;
  823. }
  824. d |= tidx[0] << 26;
  825. d |= tidx[1] << 29;
  826. unsigned int t0 = tsel[tidx[0]];
  827. unsigned int t1 = tsel[tidx[1]];
  828. if (!rotate)
  829. {
  830. t0 &= 0xFF00FF00;
  831. t1 &= 0x00FF00FF;
  832. }
  833. else
  834. {
  835. t0 &= 0xCCCCCCCC;
  836. t1 &= 0x33333333;
  837. }
  838. // Flip selectors from sign bit
  839. unsigned int t2 = (t0 | t1) ^ 0xFFFF0000;
  840. return d | static_cast<uint64_t>(_bswap(t2)) << 32;
  841. }
  842. #endif
  843. static etcpak_force_inline void Average( const uint8_t* data, v4i* a )
  844. {
  845. #ifdef __SSE4_1__
  846. __m128i d0 = _mm_loadu_si128(((__m128i*)data) + 0);
  847. __m128i d1 = _mm_loadu_si128(((__m128i*)data) + 1);
  848. __m128i d2 = _mm_loadu_si128(((__m128i*)data) + 2);
  849. __m128i d3 = _mm_loadu_si128(((__m128i*)data) + 3);
  850. __m128i d0l = _mm_unpacklo_epi8(d0, _mm_setzero_si128());
  851. __m128i d0h = _mm_unpackhi_epi8(d0, _mm_setzero_si128());
  852. __m128i d1l = _mm_unpacklo_epi8(d1, _mm_setzero_si128());
  853. __m128i d1h = _mm_unpackhi_epi8(d1, _mm_setzero_si128());
  854. __m128i d2l = _mm_unpacklo_epi8(d2, _mm_setzero_si128());
  855. __m128i d2h = _mm_unpackhi_epi8(d2, _mm_setzero_si128());
  856. __m128i d3l = _mm_unpacklo_epi8(d3, _mm_setzero_si128());
  857. __m128i d3h = _mm_unpackhi_epi8(d3, _mm_setzero_si128());
  858. __m128i sum0 = _mm_add_epi16(d0l, d1l);
  859. __m128i sum1 = _mm_add_epi16(d0h, d1h);
  860. __m128i sum2 = _mm_add_epi16(d2l, d3l);
  861. __m128i sum3 = _mm_add_epi16(d2h, d3h);
  862. __m128i sum0l = _mm_unpacklo_epi16(sum0, _mm_setzero_si128());
  863. __m128i sum0h = _mm_unpackhi_epi16(sum0, _mm_setzero_si128());
  864. __m128i sum1l = _mm_unpacklo_epi16(sum1, _mm_setzero_si128());
  865. __m128i sum1h = _mm_unpackhi_epi16(sum1, _mm_setzero_si128());
  866. __m128i sum2l = _mm_unpacklo_epi16(sum2, _mm_setzero_si128());
  867. __m128i sum2h = _mm_unpackhi_epi16(sum2, _mm_setzero_si128());
  868. __m128i sum3l = _mm_unpacklo_epi16(sum3, _mm_setzero_si128());
  869. __m128i sum3h = _mm_unpackhi_epi16(sum3, _mm_setzero_si128());
  870. __m128i b0 = _mm_add_epi32(sum0l, sum0h);
  871. __m128i b1 = _mm_add_epi32(sum1l, sum1h);
  872. __m128i b2 = _mm_add_epi32(sum2l, sum2h);
  873. __m128i b3 = _mm_add_epi32(sum3l, sum3h);
  874. __m128i a0 = _mm_srli_epi32(_mm_add_epi32(_mm_add_epi32(b2, b3), _mm_set1_epi32(4)), 3);
  875. __m128i a1 = _mm_srli_epi32(_mm_add_epi32(_mm_add_epi32(b0, b1), _mm_set1_epi32(4)), 3);
  876. __m128i a2 = _mm_srli_epi32(_mm_add_epi32(_mm_add_epi32(b1, b3), _mm_set1_epi32(4)), 3);
  877. __m128i a3 = _mm_srli_epi32(_mm_add_epi32(_mm_add_epi32(b0, b2), _mm_set1_epi32(4)), 3);
  878. _mm_storeu_si128((__m128i*)&a[0], _mm_packus_epi32(_mm_shuffle_epi32(a0, _MM_SHUFFLE(3, 0, 1, 2)), _mm_shuffle_epi32(a1, _MM_SHUFFLE(3, 0, 1, 2))));
  879. _mm_storeu_si128((__m128i*)&a[2], _mm_packus_epi32(_mm_shuffle_epi32(a2, _MM_SHUFFLE(3, 0, 1, 2)), _mm_shuffle_epi32(a3, _MM_SHUFFLE(3, 0, 1, 2))));
  880. #elif defined __ARM_NEON
  881. uint8x16x2_t t0 = vzipq_u8(vld1q_u8(data + 0), uint8x16_t());
  882. uint8x16x2_t t1 = vzipq_u8(vld1q_u8(data + 16), uint8x16_t());
  883. uint8x16x2_t t2 = vzipq_u8(vld1q_u8(data + 32), uint8x16_t());
  884. uint8x16x2_t t3 = vzipq_u8(vld1q_u8(data + 48), uint8x16_t());
  885. uint16x8x2_t d0 = { vreinterpretq_u16_u8(t0.val[0]), vreinterpretq_u16_u8(t0.val[1]) };
  886. uint16x8x2_t d1 = { vreinterpretq_u16_u8(t1.val[0]), vreinterpretq_u16_u8(t1.val[1]) };
  887. uint16x8x2_t d2 = { vreinterpretq_u16_u8(t2.val[0]), vreinterpretq_u16_u8(t2.val[1]) };
  888. uint16x8x2_t d3 = { vreinterpretq_u16_u8(t3.val[0]), vreinterpretq_u16_u8(t3.val[1]) };
  889. uint16x8x2_t s0 = vzipq_u16(vreinterpretq_u16_s16( vaddq_s16(vreinterpretq_s16_u16( d0.val[0] ), vreinterpretq_s16_u16( d1.val[0] ) ) ), uint16x8_t());
  890. uint16x8x2_t s1 = vzipq_u16(vreinterpretq_u16_s16( vaddq_s16(vreinterpretq_s16_u16( d0.val[1] ), vreinterpretq_s16_u16( d1.val[1] ) ) ), uint16x8_t());
  891. uint16x8x2_t s2 = vzipq_u16(vreinterpretq_u16_s16( vaddq_s16(vreinterpretq_s16_u16( d2.val[0] ), vreinterpretq_s16_u16( d3.val[0] ) ) ), uint16x8_t());
  892. uint16x8x2_t s3 = vzipq_u16(vreinterpretq_u16_s16( vaddq_s16(vreinterpretq_s16_u16( d2.val[1] ), vreinterpretq_s16_u16( d3.val[1] ) ) ), uint16x8_t());
  893. uint32x4x2_t sum0 = { vreinterpretq_u32_u16(s0.val[0]), vreinterpretq_u32_u16(s0.val[1]) };
  894. uint32x4x2_t sum1 = { vreinterpretq_u32_u16(s1.val[0]), vreinterpretq_u32_u16(s1.val[1]) };
  895. uint32x4x2_t sum2 = { vreinterpretq_u32_u16(s2.val[0]), vreinterpretq_u32_u16(s2.val[1]) };
  896. uint32x4x2_t sum3 = { vreinterpretq_u32_u16(s3.val[0]), vreinterpretq_u32_u16(s3.val[1]) };
  897. uint32x4_t b0 = vaddq_u32(sum0.val[0], sum0.val[1]);
  898. uint32x4_t b1 = vaddq_u32(sum1.val[0], sum1.val[1]);
  899. uint32x4_t b2 = vaddq_u32(sum2.val[0], sum2.val[1]);
  900. uint32x4_t b3 = vaddq_u32(sum3.val[0], sum3.val[1]);
  901. uint32x4_t a0 = vshrq_n_u32(vqaddq_u32(vqaddq_u32(b2, b3), vdupq_n_u32(4)), 3);
  902. uint32x4_t a1 = vshrq_n_u32(vqaddq_u32(vqaddq_u32(b0, b1), vdupq_n_u32(4)), 3);
  903. uint32x4_t a2 = vshrq_n_u32(vqaddq_u32(vqaddq_u32(b1, b3), vdupq_n_u32(4)), 3);
  904. uint32x4_t a3 = vshrq_n_u32(vqaddq_u32(vqaddq_u32(b0, b2), vdupq_n_u32(4)), 3);
  905. uint16x8_t o0 = vcombine_u16(vqmovun_s32(vreinterpretq_s32_u32( a0 )), vqmovun_s32(vreinterpretq_s32_u32( a1 )));
  906. uint16x8_t o1 = vcombine_u16(vqmovun_s32(vreinterpretq_s32_u32( a2 )), vqmovun_s32(vreinterpretq_s32_u32( a3 )));
  907. a[0] = v4i{o0[2], o0[1], o0[0], 0};
  908. a[1] = v4i{o0[6], o0[5], o0[4], 0};
  909. a[2] = v4i{o1[2], o1[1], o1[0], 0};
  910. a[3] = v4i{o1[6], o1[5], o1[4], 0};
  911. #else
  912. uint32_t r[4];
  913. uint32_t g[4];
  914. uint32_t b[4];
  915. memset(r, 0, sizeof(r));
  916. memset(g, 0, sizeof(g));
  917. memset(b, 0, sizeof(b));
  918. for( int j=0; j<4; j++ )
  919. {
  920. for( int i=0; i<4; i++ )
  921. {
  922. int index = (j & 2) + (i >> 1);
  923. b[index] += *data++;
  924. g[index] += *data++;
  925. r[index] += *data++;
  926. data++;
  927. }
  928. }
  929. a[0] = v4i{ uint16_t( (r[2] + r[3] + 4) / 8 ), uint16_t( (g[2] + g[3] + 4) / 8 ), uint16_t( (b[2] + b[3] + 4) / 8 ), 0};
  930. a[1] = v4i{ uint16_t( (r[0] + r[1] + 4) / 8 ), uint16_t( (g[0] + g[1] + 4) / 8 ), uint16_t( (b[0] + b[1] + 4) / 8 ), 0};
  931. a[2] = v4i{ uint16_t( (r[1] + r[3] + 4) / 8 ), uint16_t( (g[1] + g[3] + 4) / 8 ), uint16_t( (b[1] + b[3] + 4) / 8 ), 0};
  932. a[3] = v4i{ uint16_t( (r[0] + r[2] + 4) / 8 ), uint16_t( (g[0] + g[2] + 4) / 8 ), uint16_t( (b[0] + b[2] + 4) / 8 ), 0};
  933. #endif
  934. }
  935. static etcpak_force_inline void CalcErrorBlock( const uint8_t* data, unsigned int err[4][4] )
  936. {
  937. #ifdef __SSE4_1__
  938. __m128i d0 = _mm_loadu_si128(((__m128i*)data) + 0);
  939. __m128i d1 = _mm_loadu_si128(((__m128i*)data) + 1);
  940. __m128i d2 = _mm_loadu_si128(((__m128i*)data) + 2);
  941. __m128i d3 = _mm_loadu_si128(((__m128i*)data) + 3);
  942. __m128i dm0 = _mm_and_si128(d0, _mm_set1_epi32(0x00FFFFFF));
  943. __m128i dm1 = _mm_and_si128(d1, _mm_set1_epi32(0x00FFFFFF));
  944. __m128i dm2 = _mm_and_si128(d2, _mm_set1_epi32(0x00FFFFFF));
  945. __m128i dm3 = _mm_and_si128(d3, _mm_set1_epi32(0x00FFFFFF));
  946. __m128i d0l = _mm_unpacklo_epi8(dm0, _mm_setzero_si128());
  947. __m128i d0h = _mm_unpackhi_epi8(dm0, _mm_setzero_si128());
  948. __m128i d1l = _mm_unpacklo_epi8(dm1, _mm_setzero_si128());
  949. __m128i d1h = _mm_unpackhi_epi8(dm1, _mm_setzero_si128());
  950. __m128i d2l = _mm_unpacklo_epi8(dm2, _mm_setzero_si128());
  951. __m128i d2h = _mm_unpackhi_epi8(dm2, _mm_setzero_si128());
  952. __m128i d3l = _mm_unpacklo_epi8(dm3, _mm_setzero_si128());
  953. __m128i d3h = _mm_unpackhi_epi8(dm3, _mm_setzero_si128());
  954. __m128i sum0 = _mm_add_epi16(d0l, d1l);
  955. __m128i sum1 = _mm_add_epi16(d0h, d1h);
  956. __m128i sum2 = _mm_add_epi16(d2l, d3l);
  957. __m128i sum3 = _mm_add_epi16(d2h, d3h);
  958. __m128i sum0l = _mm_unpacklo_epi16(sum0, _mm_setzero_si128());
  959. __m128i sum0h = _mm_unpackhi_epi16(sum0, _mm_setzero_si128());
  960. __m128i sum1l = _mm_unpacklo_epi16(sum1, _mm_setzero_si128());
  961. __m128i sum1h = _mm_unpackhi_epi16(sum1, _mm_setzero_si128());
  962. __m128i sum2l = _mm_unpacklo_epi16(sum2, _mm_setzero_si128());
  963. __m128i sum2h = _mm_unpackhi_epi16(sum2, _mm_setzero_si128());
  964. __m128i sum3l = _mm_unpacklo_epi16(sum3, _mm_setzero_si128());
  965. __m128i sum3h = _mm_unpackhi_epi16(sum3, _mm_setzero_si128());
  966. __m128i b0 = _mm_add_epi32(sum0l, sum0h);
  967. __m128i b1 = _mm_add_epi32(sum1l, sum1h);
  968. __m128i b2 = _mm_add_epi32(sum2l, sum2h);
  969. __m128i b3 = _mm_add_epi32(sum3l, sum3h);
  970. __m128i a0 = _mm_add_epi32(b2, b3);
  971. __m128i a1 = _mm_add_epi32(b0, b1);
  972. __m128i a2 = _mm_add_epi32(b1, b3);
  973. __m128i a3 = _mm_add_epi32(b0, b2);
  974. _mm_storeu_si128((__m128i*)&err[0], a0);
  975. _mm_storeu_si128((__m128i*)&err[1], a1);
  976. _mm_storeu_si128((__m128i*)&err[2], a2);
  977. _mm_storeu_si128((__m128i*)&err[3], a3);
  978. #elif defined __ARM_NEON
  979. uint8x16x2_t t0 = vzipq_u8(vld1q_u8(data + 0), uint8x16_t());
  980. uint8x16x2_t t1 = vzipq_u8(vld1q_u8(data + 16), uint8x16_t());
  981. uint8x16x2_t t2 = vzipq_u8(vld1q_u8(data + 32), uint8x16_t());
  982. uint8x16x2_t t3 = vzipq_u8(vld1q_u8(data + 48), uint8x16_t());
  983. uint16x8x2_t d0 = { vreinterpretq_u16_u8(t0.val[0]), vreinterpretq_u16_u8(t0.val[1]) };
  984. uint16x8x2_t d1 = { vreinterpretq_u16_u8(t1.val[0]), vreinterpretq_u16_u8(t1.val[1]) };
  985. uint16x8x2_t d2 = { vreinterpretq_u16_u8(t2.val[0]), vreinterpretq_u16_u8(t2.val[1]) };
  986. uint16x8x2_t d3 = { vreinterpretq_u16_u8(t3.val[0]), vreinterpretq_u16_u8(t3.val[1]) };
  987. uint16x8x2_t s0 = vzipq_u16(vreinterpretq_u16_s16( vaddq_s16(vreinterpretq_s16_u16( d0.val[0] ), vreinterpretq_s16_u16( d1.val[0] ))), uint16x8_t());
  988. uint16x8x2_t s1 = vzipq_u16(vreinterpretq_u16_s16( vaddq_s16(vreinterpretq_s16_u16( d0.val[1] ), vreinterpretq_s16_u16( d1.val[1] ))), uint16x8_t());
  989. uint16x8x2_t s2 = vzipq_u16(vreinterpretq_u16_s16( vaddq_s16(vreinterpretq_s16_u16( d2.val[0] ), vreinterpretq_s16_u16( d3.val[0] ))), uint16x8_t());
  990. uint16x8x2_t s3 = vzipq_u16(vreinterpretq_u16_s16( vaddq_s16(vreinterpretq_s16_u16( d2.val[1] ), vreinterpretq_s16_u16( d3.val[1] ))), uint16x8_t());
  991. uint32x4x2_t sum0 = { vreinterpretq_u32_u16(s0.val[0]), vreinterpretq_u32_u16(s0.val[1]) };
  992. uint32x4x2_t sum1 = { vreinterpretq_u32_u16(s1.val[0]), vreinterpretq_u32_u16(s1.val[1]) };
  993. uint32x4x2_t sum2 = { vreinterpretq_u32_u16(s2.val[0]), vreinterpretq_u32_u16(s2.val[1]) };
  994. uint32x4x2_t sum3 = { vreinterpretq_u32_u16(s3.val[0]), vreinterpretq_u32_u16(s3.val[1]) };
  995. uint32x4_t b0 = vaddq_u32(sum0.val[0], sum0.val[1]);
  996. uint32x4_t b1 = vaddq_u32(sum1.val[0], sum1.val[1]);
  997. uint32x4_t b2 = vaddq_u32(sum2.val[0], sum2.val[1]);
  998. uint32x4_t b3 = vaddq_u32(sum3.val[0], sum3.val[1]);
  999. uint32x4_t a0 = vreinterpretq_u32_u8( vandq_u8(vreinterpretq_u8_u32( vqaddq_u32(b2, b3) ), vreinterpretq_u8_u32( vdupq_n_u32(0x00FFFFFF)) ) );
  1000. uint32x4_t a1 = vreinterpretq_u32_u8( vandq_u8(vreinterpretq_u8_u32( vqaddq_u32(b0, b1) ), vreinterpretq_u8_u32( vdupq_n_u32(0x00FFFFFF)) ) );
  1001. uint32x4_t a2 = vreinterpretq_u32_u8( vandq_u8(vreinterpretq_u8_u32( vqaddq_u32(b1, b3) ), vreinterpretq_u8_u32( vdupq_n_u32(0x00FFFFFF)) ) );
  1002. uint32x4_t a3 = vreinterpretq_u32_u8( vandq_u8(vreinterpretq_u8_u32( vqaddq_u32(b0, b2) ), vreinterpretq_u8_u32( vdupq_n_u32(0x00FFFFFF)) ) );
  1003. vst1q_u32(err[0], a0);
  1004. vst1q_u32(err[1], a1);
  1005. vst1q_u32(err[2], a2);
  1006. vst1q_u32(err[3], a3);
  1007. #else
  1008. unsigned int terr[4][4];
  1009. memset(terr, 0, 16 * sizeof(unsigned int));
  1010. for( int j=0; j<4; j++ )
  1011. {
  1012. for( int i=0; i<4; i++ )
  1013. {
  1014. int index = (j & 2) + (i >> 1);
  1015. unsigned int d = *data++;
  1016. terr[index][0] += d;
  1017. d = *data++;
  1018. terr[index][1] += d;
  1019. d = *data++;
  1020. terr[index][2] += d;
  1021. data++;
  1022. }
  1023. }
  1024. for( int i=0; i<3; i++ )
  1025. {
  1026. err[0][i] = terr[2][i] + terr[3][i];
  1027. err[1][i] = terr[0][i] + terr[1][i];
  1028. err[2][i] = terr[1][i] + terr[3][i];
  1029. err[3][i] = terr[0][i] + terr[2][i];
  1030. }
  1031. for( int i=0; i<4; i++ )
  1032. {
  1033. err[i][3] = 0;
  1034. }
  1035. #endif
  1036. }
  1037. static etcpak_force_inline unsigned int CalcError( const unsigned int block[4], const v4i& average )
  1038. {
  1039. unsigned int err = 0x3FFFFFFF; // Big value to prevent negative values, but small enough to prevent overflow
  1040. err -= block[0] * 2 * average[2];
  1041. err -= block[1] * 2 * average[1];
  1042. err -= block[2] * 2 * average[0];
  1043. err += 8 * ( sq( average[0] ) + sq( average[1] ) + sq( average[2] ) );
  1044. return err;
  1045. }
  1046. static etcpak_force_inline void ProcessAverages( v4i* a )
  1047. {
  1048. #ifdef __SSE4_1__
  1049. for( int i=0; i<2; i++ )
  1050. {
  1051. __m128i d = _mm_loadu_si128((__m128i*)a[i*2].data());
  1052. __m128i t = _mm_add_epi16(_mm_mullo_epi16(d, _mm_set1_epi16(31)), _mm_set1_epi16(128));
  1053. __m128i c = _mm_srli_epi16(_mm_add_epi16(t, _mm_srli_epi16(t, 8)), 8);
  1054. __m128i c1 = _mm_shuffle_epi32(c, _MM_SHUFFLE(3, 2, 3, 2));
  1055. __m128i diff = _mm_sub_epi16(c, c1);
  1056. diff = _mm_max_epi16(diff, _mm_set1_epi16(-4));
  1057. diff = _mm_min_epi16(diff, _mm_set1_epi16(3));
  1058. __m128i co = _mm_add_epi16(c1, diff);
  1059. c = _mm_blend_epi16(co, c, 0xF0);
  1060. __m128i a0 = _mm_or_si128(_mm_slli_epi16(c, 3), _mm_srli_epi16(c, 2));
  1061. _mm_storeu_si128((__m128i*)a[4+i*2].data(), a0);
  1062. }
  1063. for( int i=0; i<2; i++ )
  1064. {
  1065. __m128i d = _mm_loadu_si128((__m128i*)a[i*2].data());
  1066. __m128i t0 = _mm_add_epi16(_mm_mullo_epi16(d, _mm_set1_epi16(15)), _mm_set1_epi16(128));
  1067. __m128i t1 = _mm_srli_epi16(_mm_add_epi16(t0, _mm_srli_epi16(t0, 8)), 8);
  1068. __m128i t2 = _mm_or_si128(t1, _mm_slli_epi16(t1, 4));
  1069. _mm_storeu_si128((__m128i*)a[i*2].data(), t2);
  1070. }
  1071. #elif defined __ARM_NEON
  1072. for( int i=0; i<2; i++ )
  1073. {
  1074. int16x8_t d = vld1q_s16((int16_t*)&a[i*2]);
  1075. int16x8_t t = vaddq_s16(vmulq_s16(d, vdupq_n_s16(31)), vdupq_n_s16(128));
  1076. int16x8_t c = vshrq_n_s16(vaddq_s16(t, vshrq_n_s16(t, 8)), 8);
  1077. int16x8_t c1 = vcombine_s16(vget_high_s16(c), vget_high_s16(c));
  1078. int16x8_t diff = vsubq_s16(c, c1);
  1079. diff = vmaxq_s16(diff, vdupq_n_s16(-4));
  1080. diff = vminq_s16(diff, vdupq_n_s16(3));
  1081. int16x8_t co = vaddq_s16(c1, diff);
  1082. c = vcombine_s16(vget_low_s16(co), vget_high_s16(c));
  1083. int16x8_t a0 = vorrq_s16(vshlq_n_s16(c, 3), vshrq_n_s16(c, 2));
  1084. vst1q_s16((int16_t*)&a[4+i*2], a0);
  1085. }
  1086. for( int i=0; i<2; i++ )
  1087. {
  1088. int16x8_t d = vld1q_s16((int16_t*)&a[i*2]);
  1089. int16x8_t t0 = vaddq_s16(vmulq_s16(d, vdupq_n_s16(15)), vdupq_n_s16(128));
  1090. int16x8_t t1 = vshrq_n_s16(vaddq_s16(t0, vshrq_n_s16(t0, 8)), 8);
  1091. int16x8_t t2 = vorrq_s16(t1, vshlq_n_s16(t1, 4));
  1092. vst1q_s16((int16_t*)&a[i*2], t2);
  1093. }
  1094. #else
  1095. for( int i=0; i<2; i++ )
  1096. {
  1097. for( int j=0; j<3; j++ )
  1098. {
  1099. int32_t c1 = mul8bit( a[i*2+1][j], 31 );
  1100. int32_t c2 = mul8bit( a[i*2][j], 31 );
  1101. int32_t diff = c2 - c1;
  1102. if( diff > 3 ) diff = 3;
  1103. else if( diff < -4 ) diff = -4;
  1104. int32_t co = c1 + diff;
  1105. a[5+i*2][j] = ( c1 << 3 ) | ( c1 >> 2 );
  1106. a[4+i*2][j] = ( co << 3 ) | ( co >> 2 );
  1107. }
  1108. }
  1109. for( int i=0; i<4; i++ )
  1110. {
  1111. a[i][0] = g_avg2[mul8bit( a[i][0], 15 )];
  1112. a[i][1] = g_avg2[mul8bit( a[i][1], 15 )];
  1113. a[i][2] = g_avg2[mul8bit( a[i][2], 15 )];
  1114. }
  1115. #endif
  1116. }
  1117. static etcpak_force_inline void EncodeAverages( uint64_t& _d, const v4i* a, size_t idx )
  1118. {
  1119. auto d = _d;
  1120. d |= ( idx << 24 );
  1121. size_t base = idx << 1;
  1122. if( ( idx & 0x2 ) == 0 )
  1123. {
  1124. for( int i=0; i<3; i++ )
  1125. {
  1126. d |= uint64_t( a[base+0][i] >> 4 ) << ( i*8 );
  1127. d |= uint64_t( a[base+1][i] >> 4 ) << ( i*8 + 4 );
  1128. }
  1129. }
  1130. else
  1131. {
  1132. for( int i=0; i<3; i++ )
  1133. {
  1134. d |= uint64_t( a[base+1][i] & 0xF8 ) << ( i*8 );
  1135. int32_t c = ( ( a[base+0][i] & 0xF8 ) - ( a[base+1][i] & 0xF8 ) ) >> 3;
  1136. c &= ~0xFFFFFFF8;
  1137. d |= ((uint64_t)c) << ( i*8 );
  1138. }
  1139. }
  1140. _d = d;
  1141. }
  1142. static etcpak_force_inline uint64_t CheckSolid( const uint8_t* src )
  1143. {
  1144. #ifdef __SSE4_1__
  1145. __m128i d0 = _mm_loadu_si128(((__m128i*)src) + 0);
  1146. __m128i d1 = _mm_loadu_si128(((__m128i*)src) + 1);
  1147. __m128i d2 = _mm_loadu_si128(((__m128i*)src) + 2);
  1148. __m128i d3 = _mm_loadu_si128(((__m128i*)src) + 3);
  1149. __m128i c = _mm_shuffle_epi32(d0, _MM_SHUFFLE(0, 0, 0, 0));
  1150. __m128i c0 = _mm_cmpeq_epi8(d0, c);
  1151. __m128i c1 = _mm_cmpeq_epi8(d1, c);
  1152. __m128i c2 = _mm_cmpeq_epi8(d2, c);
  1153. __m128i c3 = _mm_cmpeq_epi8(d3, c);
  1154. __m128i m0 = _mm_and_si128(c0, c1);
  1155. __m128i m1 = _mm_and_si128(c2, c3);
  1156. __m128i m = _mm_and_si128(m0, m1);
  1157. if (!_mm_testc_si128(m, _mm_set1_epi32(-1)))
  1158. {
  1159. return 0;
  1160. }
  1161. #elif defined __ARM_NEON
  1162. int32x4_t d0 = vld1q_s32((int32_t*)src + 0);
  1163. int32x4_t d1 = vld1q_s32((int32_t*)src + 4);
  1164. int32x4_t d2 = vld1q_s32((int32_t*)src + 8);
  1165. int32x4_t d3 = vld1q_s32((int32_t*)src + 12);
  1166. int32x4_t c = vdupq_n_s32(d0[0]);
  1167. int32x4_t c0 = vreinterpretq_s32_u32(vceqq_s32(d0, c));
  1168. int32x4_t c1 = vreinterpretq_s32_u32(vceqq_s32(d1, c));
  1169. int32x4_t c2 = vreinterpretq_s32_u32(vceqq_s32(d2, c));
  1170. int32x4_t c3 = vreinterpretq_s32_u32(vceqq_s32(d3, c));
  1171. int32x4_t m0 = vandq_s32(c0, c1);
  1172. int32x4_t m1 = vandq_s32(c2, c3);
  1173. int64x2_t m = vreinterpretq_s64_s32(vandq_s32(m0, m1));
  1174. if (m[0] != -1 || m[1] != -1)
  1175. {
  1176. return 0;
  1177. }
  1178. #else
  1179. const uint8_t* ptr = src + 4;
  1180. for( int i=1; i<16; i++ )
  1181. {
  1182. if( memcmp( src, ptr, 4 ) != 0 )
  1183. {
  1184. return 0;
  1185. }
  1186. ptr += 4;
  1187. }
  1188. #endif
  1189. return 0x02000000 |
  1190. ( (unsigned int)( src[0] & 0xF8 ) << 16 ) |
  1191. ( (unsigned int)( src[1] & 0xF8 ) << 8 ) |
  1192. ( (unsigned int)( src[2] & 0xF8 ) );
  1193. }
  1194. static etcpak_force_inline void PrepareAverages( v4i a[8], const uint8_t* src, unsigned int err[4] )
  1195. {
  1196. Average( src, a );
  1197. ProcessAverages( a );
  1198. unsigned int errblock[4][4];
  1199. CalcErrorBlock( src, errblock );
  1200. for( int i=0; i<4; i++ )
  1201. {
  1202. err[i/2] += CalcError( errblock[i], a[i] );
  1203. err[2+i/2] += CalcError( errblock[i], a[i+4] );
  1204. }
  1205. }
  1206. static etcpak_force_inline void FindBestFit( uint64_t terr[2][8], uint16_t tsel[16][8], v4i a[8], const uint32_t* id, const uint8_t* data )
  1207. {
  1208. for( size_t i=0; i<16; i++ )
  1209. {
  1210. uint16_t* sel = tsel[i];
  1211. unsigned int bid = id[i];
  1212. uint64_t* ter = terr[bid%2];
  1213. uint8_t b = *data++;
  1214. uint8_t g = *data++;
  1215. uint8_t r = *data++;
  1216. data++;
  1217. int dr = a[bid][0] - r;
  1218. int dg = a[bid][1] - g;
  1219. int db = a[bid][2] - b;
  1220. #ifdef __SSE4_1__
  1221. // Reference implementation
  1222. __m128i pix = _mm_set1_epi32(dr * 77 + dg * 151 + db * 28);
  1223. // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
  1224. __m128i error0 = _mm_abs_epi32(_mm_add_epi32(pix, g_table256_SIMD[0]));
  1225. __m128i error1 = _mm_abs_epi32(_mm_add_epi32(pix, g_table256_SIMD[1]));
  1226. __m128i error2 = _mm_abs_epi32(_mm_sub_epi32(pix, g_table256_SIMD[0]));
  1227. __m128i error3 = _mm_abs_epi32(_mm_sub_epi32(pix, g_table256_SIMD[1]));
  1228. __m128i index0 = _mm_and_si128(_mm_cmplt_epi32(error1, error0), _mm_set1_epi32(1));
  1229. __m128i minError0 = _mm_min_epi32(error0, error1);
  1230. __m128i index1 = _mm_sub_epi32(_mm_set1_epi32(2), _mm_cmplt_epi32(error3, error2));
  1231. __m128i minError1 = _mm_min_epi32(error2, error3);
  1232. __m128i minIndex0 = _mm_blendv_epi8(index0, index1, _mm_cmplt_epi32(minError1, minError0));
  1233. __m128i minError = _mm_min_epi32(minError0, minError1);
  1234. // Squaring the minimum error to produce correct values when adding
  1235. __m128i minErrorLow = _mm_shuffle_epi32(minError, _MM_SHUFFLE(1, 1, 0, 0));
  1236. __m128i squareErrorLow = _mm_mul_epi32(minErrorLow, minErrorLow);
  1237. squareErrorLow = _mm_add_epi64(squareErrorLow, _mm_loadu_si128(((__m128i*)ter) + 0));
  1238. _mm_storeu_si128(((__m128i*)ter) + 0, squareErrorLow);
  1239. __m128i minErrorHigh = _mm_shuffle_epi32(minError, _MM_SHUFFLE(3, 3, 2, 2));
  1240. __m128i squareErrorHigh = _mm_mul_epi32(minErrorHigh, minErrorHigh);
  1241. squareErrorHigh = _mm_add_epi64(squareErrorHigh, _mm_loadu_si128(((__m128i*)ter) + 1));
  1242. _mm_storeu_si128(((__m128i*)ter) + 1, squareErrorHigh);
  1243. // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
  1244. error0 = _mm_abs_epi32(_mm_add_epi32(pix, g_table256_SIMD[2]));
  1245. error1 = _mm_abs_epi32(_mm_add_epi32(pix, g_table256_SIMD[3]));
  1246. error2 = _mm_abs_epi32(_mm_sub_epi32(pix, g_table256_SIMD[2]));
  1247. error3 = _mm_abs_epi32(_mm_sub_epi32(pix, g_table256_SIMD[3]));
  1248. index0 = _mm_and_si128(_mm_cmplt_epi32(error1, error0), _mm_set1_epi32(1));
  1249. minError0 = _mm_min_epi32(error0, error1);
  1250. index1 = _mm_sub_epi32(_mm_set1_epi32(2), _mm_cmplt_epi32(error3, error2));
  1251. minError1 = _mm_min_epi32(error2, error3);
  1252. __m128i minIndex1 = _mm_blendv_epi8(index0, index1, _mm_cmplt_epi32(minError1, minError0));
  1253. minError = _mm_min_epi32(minError0, minError1);
  1254. // Squaring the minimum error to produce correct values when adding
  1255. minErrorLow = _mm_shuffle_epi32(minError, _MM_SHUFFLE(1, 1, 0, 0));
  1256. squareErrorLow = _mm_mul_epi32(minErrorLow, minErrorLow);
  1257. squareErrorLow = _mm_add_epi64(squareErrorLow, _mm_loadu_si128(((__m128i*)ter) + 2));
  1258. _mm_storeu_si128(((__m128i*)ter) + 2, squareErrorLow);
  1259. minErrorHigh = _mm_shuffle_epi32(minError, _MM_SHUFFLE(3, 3, 2, 2));
  1260. squareErrorHigh = _mm_mul_epi32(minErrorHigh, minErrorHigh);
  1261. squareErrorHigh = _mm_add_epi64(squareErrorHigh, _mm_loadu_si128(((__m128i*)ter) + 3));
  1262. _mm_storeu_si128(((__m128i*)ter) + 3, squareErrorHigh);
  1263. __m128i minIndex = _mm_packs_epi32(minIndex0, minIndex1);
  1264. _mm_storeu_si128((__m128i*)sel, minIndex);
  1265. #elif defined __ARM_NEON
  1266. int32x4_t pix = vdupq_n_s32(dr * 77 + dg * 151 + db * 28);
  1267. // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
  1268. uint32x4_t error0 = vreinterpretq_u32_s32(vabsq_s32(vaddq_s32(pix, g_table256_NEON[0])));
  1269. uint32x4_t error1 = vreinterpretq_u32_s32(vabsq_s32(vaddq_s32(pix, g_table256_NEON[1])));
  1270. uint32x4_t error2 = vreinterpretq_u32_s32(vabsq_s32(vsubq_s32(pix, g_table256_NEON[0])));
  1271. uint32x4_t error3 = vreinterpretq_u32_s32(vabsq_s32(vsubq_s32(pix, g_table256_NEON[1])));
  1272. uint32x4_t index0 = vandq_u32(vcltq_u32(error1, error0), vdupq_n_u32(1));
  1273. uint32x4_t minError0 = vminq_u32(error0, error1);
  1274. uint32x4_t index1 = vreinterpretq_u32_s32(vsubq_s32(vdupq_n_s32(2), vreinterpretq_s32_u32(vcltq_u32(error3, error2))));
  1275. uint32x4_t minError1 = vminq_u32(error2, error3);
  1276. uint32x4_t blendMask = vcltq_u32(minError1, minError0);
  1277. uint32x4_t minIndex0 = vorrq_u32(vbicq_u32(index0, blendMask), vandq_u32(index1, blendMask));
  1278. uint32x4_t minError = vminq_u32(minError0, minError1);
  1279. // Squaring the minimum error to produce correct values when adding
  1280. uint32x4_t squareErrorLow = vmulq_u32(minError, minError);
  1281. uint32x4_t squareErrorHigh = vshrq_n_u32(vreinterpretq_u32_s32(vqdmulhq_s32(vreinterpretq_s32_u32(minError), vreinterpretq_s32_u32(minError))), 1);
  1282. uint32x4x2_t squareErrorZip = vzipq_u32(squareErrorLow, squareErrorHigh);
  1283. uint64x2x2_t squareError = { vreinterpretq_u64_u32(squareErrorZip.val[0]), vreinterpretq_u64_u32(squareErrorZip.val[1]) };
  1284. squareError.val[0] = vaddq_u64(squareError.val[0], vld1q_u64(ter + 0));
  1285. squareError.val[1] = vaddq_u64(squareError.val[1], vld1q_u64(ter + 2));
  1286. vst1q_u64(ter + 0, squareError.val[0]);
  1287. vst1q_u64(ter + 2, squareError.val[1]);
  1288. // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
  1289. error0 = vreinterpretq_u32_s32( vabsq_s32(vaddq_s32(pix, g_table256_NEON[2])));
  1290. error1 = vreinterpretq_u32_s32( vabsq_s32(vaddq_s32(pix, g_table256_NEON[3])));
  1291. error2 = vreinterpretq_u32_s32( vabsq_s32(vsubq_s32(pix, g_table256_NEON[2])));
  1292. error3 = vreinterpretq_u32_s32( vabsq_s32(vsubq_s32(pix, g_table256_NEON[3])));
  1293. index0 = vandq_u32(vcltq_u32(error1, error0), vdupq_n_u32(1));
  1294. minError0 = vminq_u32(error0, error1);
  1295. index1 = vreinterpretq_u32_s32( vsubq_s32(vdupq_n_s32(2), vreinterpretq_s32_u32(vcltq_u32(error3, error2))) );
  1296. minError1 = vminq_u32(error2, error3);
  1297. blendMask = vcltq_u32(minError1, minError0);
  1298. uint32x4_t minIndex1 = vorrq_u32(vbicq_u32(index0, blendMask), vandq_u32(index1, blendMask));
  1299. minError = vminq_u32(minError0, minError1);
  1300. // Squaring the minimum error to produce correct values when adding
  1301. squareErrorLow = vmulq_u32(minError, minError);
  1302. squareErrorHigh = vshrq_n_u32(vreinterpretq_u32_s32( vqdmulhq_s32(vreinterpretq_s32_u32(minError), vreinterpretq_s32_u32(minError)) ), 1 );
  1303. squareErrorZip = vzipq_u32(squareErrorLow, squareErrorHigh);
  1304. squareError.val[0] = vaddq_u64(vreinterpretq_u64_u32( squareErrorZip.val[0] ), vld1q_u64(ter + 4));
  1305. squareError.val[1] = vaddq_u64(vreinterpretq_u64_u32( squareErrorZip.val[1] ), vld1q_u64(ter + 6));
  1306. vst1q_u64(ter + 4, squareError.val[0]);
  1307. vst1q_u64(ter + 6, squareError.val[1]);
  1308. uint16x8_t minIndex = vcombine_u16(vqmovn_u32(minIndex0), vqmovn_u32(minIndex1));
  1309. vst1q_u16(sel, minIndex);
  1310. #else
  1311. int pix = dr * 77 + dg * 151 + db * 28;
  1312. for( int t=0; t<8; t++ )
  1313. {
  1314. const int64_t* tab = g_table256[t];
  1315. unsigned int idx = 0;
  1316. uint64_t err = sq( tab[0] + pix );
  1317. for( int j=1; j<4; j++ )
  1318. {
  1319. uint64_t local = sq( tab[j] + pix );
  1320. if( local < err )
  1321. {
  1322. err = local;
  1323. idx = j;
  1324. }
  1325. }
  1326. *sel++ = idx;
  1327. *ter++ += err;
  1328. }
  1329. #endif
  1330. }
  1331. }
  1332. #if defined __SSE4_1__ || defined __ARM_NEON
  1333. // Non-reference implementation, but faster. Produces same results as the AVX2 version
  1334. static etcpak_force_inline void FindBestFit( uint32_t terr[2][8], uint16_t tsel[16][8], v4i a[8], const uint32_t* id, const uint8_t* data )
  1335. {
  1336. for( size_t i=0; i<16; i++ )
  1337. {
  1338. uint16_t* sel = tsel[i];
  1339. unsigned int bid = id[i];
  1340. uint32_t* ter = terr[bid%2];
  1341. uint8_t b = *data++;
  1342. uint8_t g = *data++;
  1343. uint8_t r = *data++;
  1344. data++;
  1345. int dr = a[bid][0] - r;
  1346. int dg = a[bid][1] - g;
  1347. int db = a[bid][2] - b;
  1348. #ifdef __SSE4_1__
  1349. // The scaling values are divided by two and rounded, to allow the differences to be in the range of signed int16
  1350. // This produces slightly different results, but is significant faster
  1351. __m128i pixel = _mm_set1_epi16(dr * 38 + dg * 76 + db * 14);
  1352. __m128i pix = _mm_abs_epi16(pixel);
  1353. // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
  1354. // Since the selector table is symmetrical, we need to calculate the difference only for half of the entries.
  1355. __m128i error0 = _mm_abs_epi16(_mm_sub_epi16(pix, g_table128_SIMD[0]));
  1356. __m128i error1 = _mm_abs_epi16(_mm_sub_epi16(pix, g_table128_SIMD[1]));
  1357. __m128i index = _mm_and_si128(_mm_cmplt_epi16(error1, error0), _mm_set1_epi16(1));
  1358. __m128i minError = _mm_min_epi16(error0, error1);
  1359. // Exploiting symmetry of the selector table and use the sign bit
  1360. // This produces slightly different results, but is needed to produce same results as AVX2 implementation
  1361. __m128i indexBit = _mm_andnot_si128(_mm_srli_epi16(pixel, 15), _mm_set1_epi8(-1));
  1362. __m128i minIndex = _mm_or_si128(index, _mm_add_epi16(indexBit, indexBit));
  1363. // Squaring the minimum error to produce correct values when adding
  1364. __m128i squareErrorLo = _mm_mullo_epi16(minError, minError);
  1365. __m128i squareErrorHi = _mm_mulhi_epi16(minError, minError);
  1366. __m128i squareErrorLow = _mm_unpacklo_epi16(squareErrorLo, squareErrorHi);
  1367. __m128i squareErrorHigh = _mm_unpackhi_epi16(squareErrorLo, squareErrorHi);
  1368. squareErrorLow = _mm_add_epi32(squareErrorLow, _mm_loadu_si128(((__m128i*)ter) + 0));
  1369. _mm_storeu_si128(((__m128i*)ter) + 0, squareErrorLow);
  1370. squareErrorHigh = _mm_add_epi32(squareErrorHigh, _mm_loadu_si128(((__m128i*)ter) + 1));
  1371. _mm_storeu_si128(((__m128i*)ter) + 1, squareErrorHigh);
  1372. _mm_storeu_si128((__m128i*)sel, minIndex);
  1373. #elif defined __ARM_NEON
  1374. int16x8_t pixel = vdupq_n_s16( dr * 38 + dg * 76 + db * 14 );
  1375. int16x8_t pix = vabsq_s16( pixel );
  1376. int16x8_t error0 = vabsq_s16( vsubq_s16( pix, g_table128_NEON[0] ) );
  1377. int16x8_t error1 = vabsq_s16( vsubq_s16( pix, g_table128_NEON[1] ) );
  1378. int16x8_t index = vandq_s16( vreinterpretq_s16_u16( vcltq_s16( error1, error0 ) ), vdupq_n_s16( 1 ) );
  1379. int16x8_t minError = vminq_s16( error0, error1 );
  1380. int16x8_t indexBit = vandq_s16( vmvnq_s16( vshrq_n_s16( pixel, 15 ) ), vdupq_n_s16( -1 ) );
  1381. int16x8_t minIndex = vorrq_s16( index, vaddq_s16( indexBit, indexBit ) );
  1382. int16x4_t minErrorLow = vget_low_s16( minError );
  1383. int16x4_t minErrorHigh = vget_high_s16( minError );
  1384. int32x4_t squareErrorLow = vmull_s16( minErrorLow, minErrorLow );
  1385. int32x4_t squareErrorHigh = vmull_s16( minErrorHigh, minErrorHigh );
  1386. int32x4_t squareErrorSumLow = vaddq_s32( squareErrorLow, vld1q_s32( (int32_t*)ter ) );
  1387. int32x4_t squareErrorSumHigh = vaddq_s32( squareErrorHigh, vld1q_s32( (int32_t*)ter + 4 ) );
  1388. vst1q_s32( (int32_t*)ter, squareErrorSumLow );
  1389. vst1q_s32( (int32_t*)ter + 4, squareErrorSumHigh );
  1390. vst1q_s16( (int16_t*)sel, minIndex );
  1391. #endif
  1392. }
  1393. }
  1394. #endif
  1395. static etcpak_force_inline uint8_t convert6(float f)
  1396. {
  1397. int i = (std::min(std::max(static_cast<int>(f), 0), 1023) - 15) >> 1;
  1398. return (i + 11 - ((i + 11) >> 7) - ((i + 4) >> 7)) >> 3;
  1399. }
  1400. static etcpak_force_inline uint8_t convert7(float f)
  1401. {
  1402. int i = (std::min(std::max(static_cast<int>(f), 0), 1023) - 15) >> 1;
  1403. return (i + 9 - ((i + 9) >> 8) - ((i + 6) >> 8)) >> 2;
  1404. }
  1405. static etcpak_force_inline std::pair<uint64_t, uint64_t> Planar( const uint8_t* src, const uint8_t mode, bool useHeuristics )
  1406. {
  1407. int32_t r = 0;
  1408. int32_t g = 0;
  1409. int32_t b = 0;
  1410. for( int i = 0; i < 16; ++i )
  1411. {
  1412. b += src[i * 4 + 0];
  1413. g += src[i * 4 + 1];
  1414. r += src[i * 4 + 2];
  1415. }
  1416. int32_t difRyz = 0;
  1417. int32_t difGyz = 0;
  1418. int32_t difByz = 0;
  1419. int32_t difRxz = 0;
  1420. int32_t difGxz = 0;
  1421. int32_t difBxz = 0;
  1422. const int32_t scaling[] = { -255, -85, 85, 255 };
  1423. for (int i = 0; i < 16; ++i)
  1424. {
  1425. int32_t difB = (static_cast<int>(src[i * 4 + 0]) << 4) - b;
  1426. int32_t difG = (static_cast<int>(src[i * 4 + 1]) << 4) - g;
  1427. int32_t difR = (static_cast<int>(src[i * 4 + 2]) << 4) - r;
  1428. difRyz += difR * scaling[i % 4];
  1429. difGyz += difG * scaling[i % 4];
  1430. difByz += difB * scaling[i % 4];
  1431. difRxz += difR * scaling[i / 4];
  1432. difGxz += difG * scaling[i / 4];
  1433. difBxz += difB * scaling[i / 4];
  1434. }
  1435. const float scale = -4.0f / ((255 * 255 * 8.0f + 85 * 85 * 8.0f) * 16.0f);
  1436. float aR = difRxz * scale;
  1437. float aG = difGxz * scale;
  1438. float aB = difBxz * scale;
  1439. float bR = difRyz * scale;
  1440. float bG = difGyz * scale;
  1441. float bB = difByz * scale;
  1442. float dR = r * (4.0f / 16.0f);
  1443. float dG = g * (4.0f / 16.0f);
  1444. float dB = b * (4.0f / 16.0f);
  1445. // calculating the three colors RGBO, RGBH, and RGBV. RGB = df - af * x - bf * y;
  1446. float cofR = std::fma(aR, 255.0f, std::fma(bR, 255.0f, dR));
  1447. float cofG = std::fma(aG, 255.0f, std::fma(bG, 255.0f, dG));
  1448. float cofB = std::fma(aB, 255.0f, std::fma(bB, 255.0f, dB));
  1449. float chfR = std::fma(aR, -425.0f, std::fma(bR, 255.0f, dR));
  1450. float chfG = std::fma(aG, -425.0f, std::fma(bG, 255.0f, dG));
  1451. float chfB = std::fma(aB, -425.0f, std::fma(bB, 255.0f, dB));
  1452. float cvfR = std::fma(aR, 255.0f, std::fma(bR, -425.0f, dR));
  1453. float cvfG = std::fma(aG, 255.0f, std::fma(bG, -425.0f, dG));
  1454. float cvfB = std::fma(aB, 255.0f, std::fma(bB, -425.0f, dB));
  1455. // convert to r6g7b6
  1456. int32_t coR = convert6(cofR);
  1457. int32_t coG = convert7(cofG);
  1458. int32_t coB = convert6(cofB);
  1459. int32_t chR = convert6(chfR);
  1460. int32_t chG = convert7(chfG);
  1461. int32_t chB = convert6(chfB);
  1462. int32_t cvR = convert6(cvfR);
  1463. int32_t cvG = convert7(cvfG);
  1464. int32_t cvB = convert6(cvfB);
  1465. // Error calculation
  1466. uint64_t error = 0;
  1467. if( ModePlanar != mode && useHeuristics )
  1468. {
  1469. auto ro0 = coR;
  1470. auto go0 = coG;
  1471. auto bo0 = coB;
  1472. auto ro1 = ( ro0 >> 4 ) | ( ro0 << 2 );
  1473. auto go1 = ( go0 >> 6 ) | ( go0 << 1 );
  1474. auto bo1 = ( bo0 >> 4 ) | ( bo0 << 2 );
  1475. auto ro2 = ( ro1 << 2 ) + 2;
  1476. auto go2 = ( go1 << 2 ) + 2;
  1477. auto bo2 = ( bo1 << 2 ) + 2;
  1478. auto rh0 = chR;
  1479. auto gh0 = chG;
  1480. auto bh0 = chB;
  1481. auto rh1 = ( rh0 >> 4 ) | ( rh0 << 2 );
  1482. auto gh1 = ( gh0 >> 6 ) | ( gh0 << 1 );
  1483. auto bh1 = ( bh0 >> 4 ) | ( bh0 << 2 );
  1484. auto rh2 = rh1 - ro1;
  1485. auto gh2 = gh1 - go1;
  1486. auto bh2 = bh1 - bo1;
  1487. auto rv0 = cvR;
  1488. auto gv0 = cvG;
  1489. auto bv0 = cvB;
  1490. auto rv1 = ( rv0 >> 4 ) | ( rv0 << 2 );
  1491. auto gv1 = ( gv0 >> 6 ) | ( gv0 << 1 );
  1492. auto bv1 = ( bv0 >> 4 ) | ( bv0 << 2 );
  1493. auto rv2 = rv1 - ro1;
  1494. auto gv2 = gv1 - go1;
  1495. auto bv2 = bv1 - bo1;
  1496. for( int i = 0; i < 16; ++i )
  1497. {
  1498. int32_t cR = clampu8( ( rh2 * ( i / 4 ) + rv2 * ( i % 4 ) + ro2 ) >> 2 );
  1499. int32_t cG = clampu8( ( gh2 * ( i / 4 ) + gv2 * ( i % 4 ) + go2 ) >> 2 );
  1500. int32_t cB = clampu8( ( bh2 * ( i / 4 ) + bv2 * ( i % 4 ) + bo2 ) >> 2 );
  1501. int32_t difB = static_cast<int>( src[i * 4 + 0] ) - cB;
  1502. int32_t difG = static_cast<int>( src[i * 4 + 1] ) - cG;
  1503. int32_t difR = static_cast<int>( src[i * 4 + 2] ) - cR;
  1504. int32_t dif = difR * 38 + difG * 76 + difB * 14;
  1505. error += dif * dif;
  1506. }
  1507. }
  1508. /**/
  1509. uint32_t rgbv = cvB | ( cvG << 6 ) | ( cvR << 13 );
  1510. uint32_t rgbh = chB | ( chG << 6 ) | ( chR << 13 );
  1511. uint32_t hi = rgbv | ( ( rgbh & 0x1FFF ) << 19 );
  1512. uint32_t lo = ( chR & 0x1 ) | 0x2 | ( ( chR << 1 ) & 0x7C );
  1513. lo |= ( ( coB & 0x07 ) << 7 ) | ( ( coB & 0x18 ) << 8 ) | ( ( coB & 0x20 ) << 11 );
  1514. lo |= ( ( coG & 0x3F ) << 17 ) | ( ( coG & 0x40 ) << 18 );
  1515. lo |= coR << 25;
  1516. const auto idx = ( coR & 0x20 ) | ( ( coG & 0x20 ) >> 1 ) | ( ( coB & 0x1E ) >> 1 );
  1517. lo |= g_flags[idx];
  1518. uint64_t result = static_cast<uint32_t>( _bswap( lo ) );
  1519. result |= static_cast<uint64_t>( static_cast<uint32_t>( _bswap( hi ) ) ) << 32;
  1520. return std::make_pair( result, error );
  1521. }
  1522. #ifdef __ARM_NEON
  1523. static etcpak_force_inline int32x2_t Planar_NEON_DifXZ( int16x8_t dif_lo, int16x8_t dif_hi )
  1524. {
  1525. int32x4_t dif0 = vmull_n_s16( vget_low_s16( dif_lo ), -255 );
  1526. int32x4_t dif1 = vmull_n_s16( vget_high_s16( dif_lo ), -85 );
  1527. int32x4_t dif2 = vmull_n_s16( vget_low_s16( dif_hi ), 85 );
  1528. int32x4_t dif3 = vmull_n_s16( vget_high_s16( dif_hi ), 255 );
  1529. int32x4_t dif4 = vaddq_s32( vaddq_s32( dif0, dif1 ), vaddq_s32( dif2, dif3 ) );
  1530. #ifndef __aarch64__
  1531. int32x2_t dif5 = vpadd_s32( vget_low_s32( dif4 ), vget_high_s32( dif4 ) );
  1532. return vpadd_s32( dif5, dif5 );
  1533. #else
  1534. return vdup_n_s32( vaddvq_s32( dif4 ) );
  1535. #endif
  1536. }
  1537. static etcpak_force_inline int32x2_t Planar_NEON_DifYZ( int16x8_t dif_lo, int16x8_t dif_hi )
  1538. {
  1539. int16x4_t scaling = { -255, -85, 85, 255 };
  1540. int32x4_t dif0 = vmull_s16( vget_low_s16( dif_lo ), scaling );
  1541. int32x4_t dif1 = vmull_s16( vget_high_s16( dif_lo ), scaling );
  1542. int32x4_t dif2 = vmull_s16( vget_low_s16( dif_hi ), scaling );
  1543. int32x4_t dif3 = vmull_s16( vget_high_s16( dif_hi ), scaling );
  1544. int32x4_t dif4 = vaddq_s32( vaddq_s32( dif0, dif1 ), vaddq_s32( dif2, dif3 ) );
  1545. #ifndef __aarch64__
  1546. int32x2_t dif5 = vpadd_s32( vget_low_s32( dif4 ), vget_high_s32( dif4 ) );
  1547. return vpadd_s32( dif5, dif5 );
  1548. #else
  1549. return vdup_n_s32( vaddvq_s32( dif4 ) );
  1550. #endif
  1551. }
  1552. static etcpak_force_inline int16x8_t Planar_NEON_SumWide( uint8x16_t src )
  1553. {
  1554. uint16x8_t accu8 = vpaddlq_u8( src );
  1555. #ifndef __aarch64__
  1556. uint16x4_t accu4 = vpadd_u16( vget_low_u16( accu8 ), vget_high_u16( accu8 ) );
  1557. uint16x4_t accu2 = vpadd_u16( accu4, accu4 );
  1558. uint16x4_t accu1 = vpadd_u16( accu2, accu2 );
  1559. return vreinterpretq_s16_u16( vcombine_u16( accu1, accu1 ) );
  1560. #else
  1561. return vdupq_n_s16( vaddvq_u16( accu8 ) );
  1562. #endif
  1563. }
  1564. static etcpak_force_inline int16x8_t convert6_NEON( int32x4_t lo, int32x4_t hi )
  1565. {
  1566. uint16x8_t x = vcombine_u16( vqmovun_s32( lo ), vqmovun_s32( hi ) );
  1567. int16x8_t i = vreinterpretq_s16_u16( vshrq_n_u16( vqshlq_n_u16( x, 6 ), 6) ); // clamp 0-1023
  1568. i = vhsubq_s16( i, vdupq_n_s16( 15 ) );
  1569. int16x8_t ip11 = vaddq_s16( i, vdupq_n_s16( 11 ) );
  1570. int16x8_t ip4 = vaddq_s16( i, vdupq_n_s16( 4 ) );
  1571. return vshrq_n_s16( vsubq_s16( vsubq_s16( ip11, vshrq_n_s16( ip11, 7 ) ), vshrq_n_s16( ip4, 7) ), 3 );
  1572. }
  1573. static etcpak_force_inline int16x4_t convert7_NEON( int32x4_t x )
  1574. {
  1575. int16x4_t i = vreinterpret_s16_u16( vshr_n_u16( vqshl_n_u16( vqmovun_s32( x ), 6 ), 6 ) ); // clamp 0-1023
  1576. i = vhsub_s16( i, vdup_n_s16( 15 ) );
  1577. int16x4_t p9 = vadd_s16( i, vdup_n_s16( 9 ) );
  1578. int16x4_t p6 = vadd_s16( i, vdup_n_s16( 6 ) );
  1579. return vshr_n_s16( vsub_s16( vsub_s16( p9, vshr_n_s16( p9, 8 ) ), vshr_n_s16( p6, 8 ) ), 2 );
  1580. }
  1581. static etcpak_force_inline std::pair<uint64_t, uint64_t> Planar_NEON( const uint8_t* src, const uint8_t mode, bool useHeuristics )
  1582. {
  1583. uint8x16x4_t srcBlock = vld4q_u8( src );
  1584. int16x8_t bSumWide = Planar_NEON_SumWide( srcBlock.val[0] );
  1585. int16x8_t gSumWide = Planar_NEON_SumWide( srcBlock.val[1] );
  1586. int16x8_t rSumWide = Planar_NEON_SumWide( srcBlock.val[2] );
  1587. int16x8_t dif_R_lo = vsubq_s16( vreinterpretq_s16_u16( vshll_n_u8( vget_low_u8( srcBlock.val[2] ), 4) ), rSumWide );
  1588. int16x8_t dif_R_hi = vsubq_s16( vreinterpretq_s16_u16( vshll_n_u8( vget_high_u8( srcBlock.val[2] ), 4) ), rSumWide );
  1589. int16x8_t dif_G_lo = vsubq_s16( vreinterpretq_s16_u16( vshll_n_u8( vget_low_u8( srcBlock.val[1] ), 4 ) ), gSumWide );
  1590. int16x8_t dif_G_hi = vsubq_s16( vreinterpretq_s16_u16( vshll_n_u8( vget_high_u8( srcBlock.val[1] ), 4 ) ), gSumWide );
  1591. int16x8_t dif_B_lo = vsubq_s16( vreinterpretq_s16_u16( vshll_n_u8( vget_low_u8( srcBlock.val[0] ), 4) ), bSumWide );
  1592. int16x8_t dif_B_hi = vsubq_s16( vreinterpretq_s16_u16( vshll_n_u8( vget_high_u8( srcBlock.val[0] ), 4) ), bSumWide );
  1593. int32x2x2_t dif_xz_z = vzip_s32( vzip_s32( Planar_NEON_DifXZ( dif_B_lo, dif_B_hi ), Planar_NEON_DifXZ( dif_R_lo, dif_R_hi ) ).val[0], Planar_NEON_DifXZ( dif_G_lo, dif_G_hi ) );
  1594. int32x4_t dif_xz = vcombine_s32( dif_xz_z.val[0], dif_xz_z.val[1] );
  1595. int32x2x2_t dif_yz_z = vzip_s32( vzip_s32( Planar_NEON_DifYZ( dif_B_lo, dif_B_hi ), Planar_NEON_DifYZ( dif_R_lo, dif_R_hi ) ).val[0], Planar_NEON_DifYZ( dif_G_lo, dif_G_hi ) );
  1596. int32x4_t dif_yz = vcombine_s32( dif_yz_z.val[0], dif_yz_z.val[1] );
  1597. const float fscale = -4.0f / ( (255 * 255 * 8.0f + 85 * 85 * 8.0f ) * 16.0f );
  1598. float32x4_t fa = vmulq_n_f32( vcvtq_f32_s32( dif_xz ), fscale );
  1599. float32x4_t fb = vmulq_n_f32( vcvtq_f32_s32( dif_yz ), fscale );
  1600. int16x4_t bgrgSum = vzip_s16( vzip_s16( vget_low_s16( bSumWide ), vget_low_s16( rSumWide ) ).val[0], vget_low_s16( gSumWide ) ).val[0];
  1601. float32x4_t fd = vmulq_n_f32( vcvtq_f32_s32( vmovl_s16( bgrgSum ) ), 4.0f / 16.0f);
  1602. float32x4_t cof = vmlaq_n_f32( vmlaq_n_f32( fd, fb, 255.0f ), fa, 255.0f );
  1603. float32x4_t chf = vmlaq_n_f32( vmlaq_n_f32( fd, fb, 255.0f ), fa, -425.0f );
  1604. float32x4_t cvf = vmlaq_n_f32( vmlaq_n_f32( fd, fb, -425.0f ), fa, 255.0f );
  1605. int32x4_t coi = vcvtq_s32_f32( cof );
  1606. int32x4_t chi = vcvtq_s32_f32( chf );
  1607. int32x4_t cvi = vcvtq_s32_f32( cvf );
  1608. int32x4x2_t tr_hv = vtrnq_s32( chi, cvi );
  1609. int32x4x2_t tr_o = vtrnq_s32( coi, coi );
  1610. int16x8_t c_hvoo_br_6 = convert6_NEON( tr_hv.val[0], tr_o.val[0] );
  1611. int16x4_t c_hvox_g_7 = convert7_NEON( vcombine_s32( vget_low_s32( tr_hv.val[1] ), vget_low_s32( tr_o.val[1] ) ) );
  1612. int16x8_t c_hvoo_br_8 = vorrq_s16( vshrq_n_s16( c_hvoo_br_6, 4 ), vshlq_n_s16( c_hvoo_br_6, 2 ) );
  1613. int16x4_t c_hvox_g_8 = vorr_s16( vshr_n_s16( c_hvox_g_7, 6 ), vshl_n_s16( c_hvox_g_7, 1 ) );
  1614. uint64_t error = 0;
  1615. if( mode != ModePlanar && useHeuristics )
  1616. {
  1617. int16x4_t rec_gxbr_o = vext_s16( c_hvox_g_8, vget_high_s16( c_hvoo_br_8 ), 3 );
  1618. rec_gxbr_o = vadd_s16( vshl_n_s16( rec_gxbr_o, 2 ), vdup_n_s16( 2 ) );
  1619. int16x8_t rec_ro_wide = vdupq_lane_s16( rec_gxbr_o, 3 );
  1620. int16x8_t rec_go_wide = vdupq_lane_s16( rec_gxbr_o, 0 );
  1621. int16x8_t rec_bo_wide = vdupq_lane_s16( rec_gxbr_o, 1 );
  1622. int16x4_t br_hv2 = vsub_s16( vget_low_s16( c_hvoo_br_8 ), vget_high_s16( c_hvoo_br_8 ) );
  1623. int16x4_t gg_hv2 = vsub_s16( c_hvox_g_8, vdup_lane_s16( c_hvox_g_8, 2 ) );
  1624. int16x8_t scaleh_lo = { 0, 0, 0, 0, 1, 1, 1, 1 };
  1625. int16x8_t scaleh_hi = { 2, 2, 2, 2, 3, 3, 3, 3 };
  1626. int16x8_t scalev = { 0, 1, 2, 3, 0, 1, 2, 3 };
  1627. int16x8_t rec_r_1 = vmlaq_lane_s16( rec_ro_wide, scalev, br_hv2, 3 );
  1628. int16x8_t rec_r_lo = vreinterpretq_s16_u16( vmovl_u8( vqshrun_n_s16( vmlaq_lane_s16( rec_r_1, scaleh_lo, br_hv2, 2 ), 2 ) ) );
  1629. int16x8_t rec_r_hi = vreinterpretq_s16_u16( vmovl_u8( vqshrun_n_s16( vmlaq_lane_s16( rec_r_1, scaleh_hi, br_hv2, 2 ), 2 ) ) );
  1630. int16x8_t rec_b_1 = vmlaq_lane_s16( rec_bo_wide, scalev, br_hv2, 1 );
  1631. int16x8_t rec_b_lo = vreinterpretq_s16_u16( vmovl_u8( vqshrun_n_s16( vmlaq_lane_s16( rec_b_1, scaleh_lo, br_hv2, 0 ), 2 ) ) );
  1632. int16x8_t rec_b_hi = vreinterpretq_s16_u16( vmovl_u8( vqshrun_n_s16( vmlaq_lane_s16( rec_b_1, scaleh_hi, br_hv2, 0 ), 2 ) ) );
  1633. int16x8_t rec_g_1 = vmlaq_lane_s16( rec_go_wide, scalev, gg_hv2, 1 );
  1634. int16x8_t rec_g_lo = vreinterpretq_s16_u16( vmovl_u8( vqshrun_n_s16( vmlaq_lane_s16( rec_g_1, scaleh_lo, gg_hv2, 0 ), 2 ) ) );
  1635. int16x8_t rec_g_hi = vreinterpretq_s16_u16( vmovl_u8( vqshrun_n_s16( vmlaq_lane_s16( rec_g_1, scaleh_hi, gg_hv2, 0 ), 2 ) ) );
  1636. int16x8_t dif_r_lo = vsubq_s16( vreinterpretq_s16_u16( vmovl_u8( vget_low_u8( srcBlock.val[2] ) ) ), rec_r_lo );
  1637. int16x8_t dif_r_hi = vsubq_s16( vreinterpretq_s16_u16( vmovl_u8( vget_high_u8( srcBlock.val[2] ) ) ), rec_r_hi );
  1638. int16x8_t dif_g_lo = vsubq_s16( vreinterpretq_s16_u16( vmovl_u8( vget_low_u8( srcBlock.val[1] ) ) ), rec_g_lo );
  1639. int16x8_t dif_g_hi = vsubq_s16( vreinterpretq_s16_u16( vmovl_u8( vget_high_u8( srcBlock.val[1] ) ) ), rec_g_hi );
  1640. int16x8_t dif_b_lo = vsubq_s16( vreinterpretq_s16_u16( vmovl_u8( vget_low_u8( srcBlock.val[0] ) ) ), rec_b_lo );
  1641. int16x8_t dif_b_hi = vsubq_s16( vreinterpretq_s16_u16( vmovl_u8( vget_high_u8( srcBlock.val[0] ) ) ), rec_b_hi );
  1642. int16x8_t dif_lo = vmlaq_n_s16( vmlaq_n_s16( vmulq_n_s16( dif_r_lo, 38 ), dif_g_lo, 76 ), dif_b_lo, 14 );
  1643. int16x8_t dif_hi = vmlaq_n_s16( vmlaq_n_s16( vmulq_n_s16( dif_r_hi, 38 ), dif_g_hi, 76 ), dif_b_hi, 14 );
  1644. int16x4_t tmpDif = vget_low_s16( dif_lo );
  1645. int32x4_t difsq_0 = vmull_s16( tmpDif, tmpDif );
  1646. tmpDif = vget_high_s16( dif_lo );
  1647. int32x4_t difsq_1 = vmull_s16( tmpDif, tmpDif );
  1648. tmpDif = vget_low_s16( dif_hi );
  1649. int32x4_t difsq_2 = vmull_s16( tmpDif, tmpDif );
  1650. tmpDif = vget_high_s16( dif_hi );
  1651. int32x4_t difsq_3 = vmull_s16( tmpDif, tmpDif );
  1652. uint32x4_t difsq_5 = vaddq_u32( vreinterpretq_u32_s32( difsq_0 ), vreinterpretq_u32_s32( difsq_1 ) );
  1653. uint32x4_t difsq_6 = vaddq_u32( vreinterpretq_u32_s32( difsq_2 ), vreinterpretq_u32_s32( difsq_3 ) );
  1654. uint64x2_t difsq_7 = vaddl_u32( vget_low_u32( difsq_5 ), vget_high_u32( difsq_5 ) );
  1655. uint64x2_t difsq_8 = vaddl_u32( vget_low_u32( difsq_6 ), vget_high_u32( difsq_6 ) );
  1656. uint64x2_t difsq_9 = vaddq_u64( difsq_7, difsq_8 );
  1657. #ifdef __aarch64__
  1658. error = vaddvq_u64( difsq_9 );
  1659. #else
  1660. error = vgetq_lane_u64( difsq_9, 0 ) + vgetq_lane_u64( difsq_9, 1 );
  1661. #endif
  1662. }
  1663. int32_t coR = c_hvoo_br_6[6];
  1664. int32_t coG = c_hvox_g_7[2];
  1665. int32_t coB = c_hvoo_br_6[4];
  1666. int32_t chR = c_hvoo_br_6[2];
  1667. int32_t chG = c_hvox_g_7[0];
  1668. int32_t chB = c_hvoo_br_6[0];
  1669. int32_t cvR = c_hvoo_br_6[3];
  1670. int32_t cvG = c_hvox_g_7[1];
  1671. int32_t cvB = c_hvoo_br_6[1];
  1672. uint32_t rgbv = cvB | ( cvG << 6 ) | ( cvR << 13 );
  1673. uint32_t rgbh = chB | ( chG << 6 ) | ( chR << 13 );
  1674. uint32_t hi = rgbv | ( ( rgbh & 0x1FFF ) << 19 );
  1675. uint32_t lo = ( chR & 0x1 ) | 0x2 | ( ( chR << 1 ) & 0x7C );
  1676. lo |= ( ( coB & 0x07 ) << 7 ) | ( ( coB & 0x18 ) << 8 ) | ( ( coB & 0x20 ) << 11 );
  1677. lo |= ( ( coG & 0x3F) << 17) | ( (coG & 0x40 ) << 18 );
  1678. lo |= coR << 25;
  1679. const auto idx = ( coR & 0x20 ) | ( ( coG & 0x20 ) >> 1 ) | ( ( coB & 0x1E ) >> 1 );
  1680. lo |= g_flags[idx];
  1681. uint64_t result = static_cast<uint32_t>( _bswap(lo) );
  1682. result |= static_cast<uint64_t>( static_cast<uint32_t>( _bswap( hi ) ) ) << 32;
  1683. return std::make_pair( result, error );
  1684. }
  1685. #endif
  1686. #ifdef __AVX2__
  1687. uint32_t calculateErrorTH( bool tMode, uint8_t( colorsRGB444 )[2][3], uint8_t& dist, uint32_t& pixIndices, uint8_t startDist, __m128i r8, __m128i g8, __m128i b8 )
  1688. #else
  1689. uint32_t calculateErrorTH( bool tMode, uint8_t* src, uint8_t( colorsRGB444 )[2][3], uint8_t& dist, uint32_t& pixIndices, uint8_t startDist )
  1690. #endif
  1691. {
  1692. uint32_t blockErr = 0, bestBlockErr = MaxError;
  1693. uint32_t pixColors;
  1694. uint8_t possibleColors[4][3];
  1695. uint8_t colors[2][3];
  1696. decompressColor( colorsRGB444, colors );
  1697. #ifdef __AVX2__
  1698. __m128i reverseMask = _mm_set_epi8( 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15 );
  1699. #endif
  1700. // test distances
  1701. for( uint8_t d = startDist; d < 8; ++d )
  1702. {
  1703. if( d >= 2 && dist == d - 2 ) break;
  1704. blockErr = 0;
  1705. pixColors = 0;
  1706. if( tMode )
  1707. {
  1708. calculatePaintColors59T( d, colors, possibleColors );
  1709. }
  1710. else
  1711. {
  1712. calculatePaintColors58H( d, colors, possibleColors );
  1713. }
  1714. #ifdef __AVX2__
  1715. // RGB ordering
  1716. __m128i b8Rev = _mm_shuffle_epi8( b8, reverseMask );
  1717. __m128i g8Rev = _mm_shuffle_epi8( g8, reverseMask );
  1718. __m128i r8Rev = _mm_shuffle_epi8( r8, reverseMask );
  1719. // extends 3x128 bits RGB into 3x256 bits RGB for error comparisions
  1720. static const __m128i zero = _mm_setzero_si128();
  1721. __m128i b8Lo = _mm_unpacklo_epi8( b8Rev, zero );
  1722. __m128i g8Lo = _mm_unpacklo_epi8( g8Rev, zero );
  1723. __m128i r8Lo = _mm_unpacklo_epi8( r8Rev, zero );
  1724. __m128i b8Hi = _mm_unpackhi_epi8( b8Rev, zero );
  1725. __m128i g8Hi = _mm_unpackhi_epi8( g8Rev, zero );
  1726. __m128i r8Hi = _mm_unpackhi_epi8( r8Rev, zero );
  1727. __m256i b8 = _mm256_set_m128i( b8Hi, b8Lo );
  1728. __m256i g8 = _mm256_set_m128i( g8Hi, g8Lo );
  1729. __m256i r8 = _mm256_set_m128i( r8Hi, r8Lo );
  1730. // caculates differences between the pixel colrs and the palette colors
  1731. __m256i diffb = _mm256_abs_epi16( _mm256_sub_epi16( b8, _mm256_set1_epi16( possibleColors[0][B] ) ) );
  1732. __m256i diffg = _mm256_abs_epi16( _mm256_sub_epi16( g8, _mm256_set1_epi16( possibleColors[0][G] ) ) );
  1733. __m256i diffr = _mm256_abs_epi16( _mm256_sub_epi16( r8, _mm256_set1_epi16( possibleColors[0][R] ) ) );
  1734. // luma-based error calculations
  1735. static const __m256i bWeight = _mm256_set1_epi16( 14 );
  1736. static const __m256i gWeight = _mm256_set1_epi16( 76 );
  1737. static const __m256i rWeight = _mm256_set1_epi16( 38 );
  1738. diffb = _mm256_mullo_epi16( diffb, bWeight );
  1739. diffg = _mm256_mullo_epi16( diffg, gWeight );
  1740. diffr = _mm256_mullo_epi16( diffr, rWeight );
  1741. // obtains the error with the current palette color
  1742. __m256i lowestPixErr = _mm256_add_epi16( _mm256_add_epi16( diffb, diffg ), diffr );
  1743. // error calucations with the remaining three palette colors
  1744. static const uint32_t masks[4] = { 0, 0x55555555, 0xAAAAAAAA, 0xFFFFFFFF };
  1745. for( uint8_t c = 1; c < 4; c++ )
  1746. {
  1747. __m256i diffb = _mm256_abs_epi16( _mm256_sub_epi16( b8, _mm256_set1_epi16( possibleColors[c][B] ) ) );
  1748. __m256i diffg = _mm256_abs_epi16( _mm256_sub_epi16( g8, _mm256_set1_epi16( possibleColors[c][G] ) ) );
  1749. __m256i diffr = _mm256_abs_epi16( _mm256_sub_epi16( r8, _mm256_set1_epi16( possibleColors[c][R] ) ) );
  1750. diffb = _mm256_mullo_epi16( diffb, bWeight );
  1751. diffg = _mm256_mullo_epi16( diffg, gWeight );
  1752. diffr = _mm256_mullo_epi16( diffr, rWeight );
  1753. // error comparison with the previous best color
  1754. __m256i pixErrors = _mm256_add_epi16( _mm256_add_epi16( diffb, diffg ), diffr );
  1755. __m256i minErr = _mm256_min_epu16( lowestPixErr, pixErrors );
  1756. __m256i cmpRes = _mm256_cmpeq_epi16( pixErrors, minErr );
  1757. lowestPixErr = minErr;
  1758. // update pixel colors
  1759. uint32_t updPixColors = _mm256_movemask_epi8( cmpRes );
  1760. uint32_t prevPixColors = pixColors & ~updPixColors;
  1761. uint32_t mskPixColors = masks[c] & updPixColors;
  1762. pixColors = prevPixColors | mskPixColors;
  1763. }
  1764. // accumulate the block error
  1765. alignas( 32 ) uint16_t pixErr16[16] = { 0, };
  1766. _mm256_storeu_si256( (__m256i*)pixErr16, lowestPixErr );
  1767. for( uint8_t p = 0; p < 16; p++ )
  1768. {
  1769. blockErr += (int)( pixErr16[p] ) * pixErr16[p];
  1770. }
  1771. #else
  1772. for( size_t y = 0; y < 4; ++y )
  1773. {
  1774. for( size_t x = 0; x < 4; ++x )
  1775. {
  1776. uint32_t bestPixErr = MaxError;
  1777. pixColors <<= 2; // Make room for next value
  1778. // Loop possible block colors
  1779. for( uint8_t c = 0; c < 4; ++c )
  1780. {
  1781. int diff[3];
  1782. diff[R] = src[4 * ( x * 4 + y ) + R] - possibleColors[c][R];
  1783. diff[G] = src[4 * ( x * 4 + y ) + G] - possibleColors[c][G];
  1784. diff[B] = src[4 * ( x * 4 + y ) + B] - possibleColors[c][B];
  1785. const uint32_t err = 38 * abs( diff[R] ) + 76 * abs( diff[G] ) + 14 * abs( diff[B] );
  1786. uint32_t pixErr = err * err;
  1787. // Choose best error
  1788. if( pixErr < bestPixErr )
  1789. {
  1790. bestPixErr = pixErr;
  1791. pixColors ^= ( pixColors & 3 ); // Reset the two first bits
  1792. pixColors |= c;
  1793. }
  1794. }
  1795. blockErr += bestPixErr;
  1796. }
  1797. }
  1798. #endif
  1799. if( blockErr < bestBlockErr )
  1800. {
  1801. bestBlockErr = blockErr;
  1802. dist = d;
  1803. pixIndices = pixColors;
  1804. }
  1805. }
  1806. return bestBlockErr;
  1807. }
  1808. // main T-/H-mode compression function
  1809. #ifdef __AVX2__
  1810. uint32_t compressBlockTH( uint8_t* src, Luma& l, uint32_t& compressed1, uint32_t& compressed2, bool& tMode, __m128i r8, __m128i g8, __m128i b8 )
  1811. #else
  1812. uint32_t compressBlockTH( uint8_t *src, Luma& l, uint32_t& compressed1, uint32_t& compressed2, bool &tMode )
  1813. #endif
  1814. {
  1815. #ifdef __AVX2__
  1816. alignas( 8 ) uint8_t luma[16] = { 0, };
  1817. _mm_storeu_si128 ( (__m128i* )luma, l.luma8 );
  1818. #elif defined __ARM_NEON && defined __aarch64__
  1819. alignas( 8 ) uint8_t luma[16] = { 0 };
  1820. vst1q_u8( luma, l.luma8 );
  1821. #else
  1822. uint8_t* luma = l.val;
  1823. #endif
  1824. uint8_t pixIdx[16] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
  1825. // 1) sorts the pairs of (luma, pix_idx)
  1826. insertionSort( luma, pixIdx );
  1827. // 2) finds the min (left+right)
  1828. uint8_t minSumRangeIdx = 0;
  1829. uint16_t minSumRangeValue;
  1830. uint16_t sum;
  1831. static const uint8_t diffBonus[15] = {8, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 4, 8};
  1832. const int16_t temp = luma[15] - luma[0];
  1833. minSumRangeValue = luma[15] - luma[1] + diffBonus[0];
  1834. for( uint8_t i = 1; i < 14; i++ )
  1835. {
  1836. sum = temp - luma[i+1] + luma[i] + diffBonus[i];
  1837. if( minSumRangeValue > sum )
  1838. {
  1839. minSumRangeValue = sum;
  1840. minSumRangeIdx = i;
  1841. }
  1842. }
  1843. sum = luma[14] - luma[0] + diffBonus[14];
  1844. if( minSumRangeValue > sum )
  1845. {
  1846. minSumRangeValue = sum;
  1847. minSumRangeIdx = 14;
  1848. }
  1849. uint8_t lRange, rRange;
  1850. lRange = luma[minSumRangeIdx] - luma[0];
  1851. rRange = luma[15] - luma[minSumRangeIdx + 1];
  1852. // 3) sets a proper mode
  1853. bool swap = false;
  1854. if( lRange >= rRange )
  1855. {
  1856. if( lRange >= rRange * 2 )
  1857. {
  1858. swap = true;
  1859. tMode = true;
  1860. }
  1861. }
  1862. else
  1863. {
  1864. if( lRange * 2 <= rRange ) tMode = true;
  1865. }
  1866. // 4) calculates the two base colors
  1867. uint8_t rangeIdx[4] = { pixIdx[0], pixIdx[minSumRangeIdx], pixIdx[minSumRangeIdx + 1], pixIdx[15] };
  1868. uint16_t r[4], g[4], b[4];
  1869. for( uint8_t i = 0; i < 4; ++i )
  1870. {
  1871. uint8_t idx = rangeIdx[i] * 4;
  1872. b[i] = src[idx];
  1873. g[i] = src[idx + 1];
  1874. r[i] = src[idx + 2];
  1875. }
  1876. uint8_t mid_rgb[2][3];
  1877. if( swap )
  1878. {
  1879. mid_rgb[1][B] = ( b[0] + b[1] ) / 2;
  1880. mid_rgb[1][G] = ( g[0] + g[1] ) / 2;
  1881. mid_rgb[1][R] = ( r[0] + r[1] ) / 2;
  1882. uint16_t sum_rgb[3] = { 0, 0, 0 };
  1883. for( uint8_t i = minSumRangeIdx + 1; i < 16; i++ )
  1884. {
  1885. uint8_t idx = pixIdx[i] * 4;
  1886. sum_rgb[B] += src[idx];
  1887. sum_rgb[G] += src[idx + 1];
  1888. sum_rgb[R] += src[idx + 2];
  1889. }
  1890. const uint8_t temp = 15 - minSumRangeIdx;
  1891. mid_rgb[0][B] = sum_rgb[B] / temp;
  1892. mid_rgb[0][G] = sum_rgb[G] / temp;
  1893. mid_rgb[0][R] = sum_rgb[R] / temp;
  1894. }
  1895. else
  1896. {
  1897. mid_rgb[0][B] = (b[0] + b[1]) / 2;
  1898. mid_rgb[0][G] = (g[0] + g[1]) / 2;
  1899. mid_rgb[0][R] = (r[0] + r[1]) / 2;
  1900. if( tMode )
  1901. {
  1902. uint16_t sum_rgb[3] = { 0, 0, 0 };
  1903. for( uint8_t i = minSumRangeIdx + 1; i < 16; i++ )
  1904. {
  1905. uint8_t idx = pixIdx[i] * 4;
  1906. sum_rgb[B] += src[idx];
  1907. sum_rgb[G] += src[idx + 1];
  1908. sum_rgb[R] += src[idx + 2];
  1909. }
  1910. const uint8_t temp = 15 - minSumRangeIdx;
  1911. mid_rgb[1][B] = sum_rgb[B] / temp;
  1912. mid_rgb[1][G] = sum_rgb[G] / temp;
  1913. mid_rgb[1][R] = sum_rgb[R] / temp;
  1914. }
  1915. else
  1916. {
  1917. mid_rgb[1][B] = (b[2] + b[3]) / 2;
  1918. mid_rgb[1][G] = (g[2] + g[3]) / 2;
  1919. mid_rgb[1][R] = (r[2] + r[3]) / 2;
  1920. }
  1921. }
  1922. // 5) sets the start distance index
  1923. uint32_t startDistCandidate;
  1924. uint32_t avgDist;
  1925. if( tMode )
  1926. {
  1927. if( swap )
  1928. {
  1929. avgDist = ( b[1] - b[0] + g[1] - g[0] + r[1] - r[0] ) / 6;
  1930. }
  1931. else
  1932. {
  1933. avgDist = ( b[3] - b[2] + g[3] - g[2] + r[3] - r[2] ) / 6;
  1934. }
  1935. }
  1936. else
  1937. {
  1938. avgDist = ( b[1] - b[0] + g[1] - g[0] + r[1] - r[0] + b[3] - b[2] + g[3] - g[2] + r[3] - r[2] ) / 12;
  1939. }
  1940. if( avgDist <= 16)
  1941. {
  1942. startDistCandidate = 0;
  1943. }
  1944. else if( avgDist <= 23 )
  1945. {
  1946. startDistCandidate = 1;
  1947. }
  1948. else if( avgDist <= 32 )
  1949. {
  1950. startDistCandidate = 2;
  1951. }
  1952. else if( avgDist <= 41 )
  1953. {
  1954. startDistCandidate = 3;
  1955. }
  1956. else
  1957. {
  1958. startDistCandidate = 4;
  1959. }
  1960. uint32_t bestErr = MaxError;
  1961. uint32_t bestPixIndices;
  1962. uint8_t bestDist = 10;
  1963. uint8_t colorsRGB444[2][3];
  1964. compressColor( mid_rgb, colorsRGB444, tMode );
  1965. compressed1 = 0;
  1966. // 6) finds the best candidate with the lowest error
  1967. #ifdef __AVX2__
  1968. // Vectorized ver
  1969. bestErr = calculateErrorTH( tMode, colorsRGB444, bestDist, bestPixIndices, startDistCandidate, r8, g8, b8 );
  1970. #else
  1971. // Scalar ver
  1972. bestErr = calculateErrorTH( tMode, src, colorsRGB444, bestDist, bestPixIndices, startDistCandidate );
  1973. #endif
  1974. // 7) outputs the final T or H block
  1975. if( tMode )
  1976. {
  1977. // Put the compress params into the compression block
  1978. compressed1 |= ( colorsRGB444[0][R] & 0xf ) << 23;
  1979. compressed1 |= ( colorsRGB444[0][G] & 0xf ) << 19;
  1980. compressed1 |= ( colorsRGB444[0][B] ) << 15;
  1981. compressed1 |= ( colorsRGB444[1][R] ) << 11;
  1982. compressed1 |= ( colorsRGB444[1][G] ) << 7;
  1983. compressed1 |= ( colorsRGB444[1][B] ) << 3;
  1984. compressed1 |= bestDist & 0x7;
  1985. }
  1986. else
  1987. {
  1988. int bestRGB444ColPacked[2];
  1989. bestRGB444ColPacked[0] = (colorsRGB444[0][R] << 8) + (colorsRGB444[0][G] << 4) + colorsRGB444[0][B];
  1990. bestRGB444ColPacked[1] = (colorsRGB444[1][R] << 8) + (colorsRGB444[1][G] << 4) + colorsRGB444[1][B];
  1991. if( ( bestRGB444ColPacked[0] >= bestRGB444ColPacked[1] ) ^ ( ( bestDist & 1 ) == 1 ) )
  1992. {
  1993. swapColors( colorsRGB444 );
  1994. // Reshuffle pixel indices to to exchange C1 with C3, and C2 with C4
  1995. bestPixIndices = ( 0x55555555 & bestPixIndices ) | ( 0xaaaaaaaa & ( ~bestPixIndices ) );
  1996. }
  1997. // Put the compress params into the compression block
  1998. compressed1 |= ( colorsRGB444[0][R] & 0xf ) << 22;
  1999. compressed1 |= ( colorsRGB444[0][G] & 0xf ) << 18;
  2000. compressed1 |= ( colorsRGB444[0][B] & 0xf ) << 14;
  2001. compressed1 |= ( colorsRGB444[1][R] & 0xf ) << 10;
  2002. compressed1 |= ( colorsRGB444[1][G] & 0xf ) << 6;
  2003. compressed1 |= ( colorsRGB444[1][B] & 0xf ) << 2;
  2004. compressed1 |= ( bestDist >> 1 ) & 0x3;
  2005. }
  2006. bestPixIndices = indexConversion( bestPixIndices );
  2007. compressed2 = 0;
  2008. compressed2 = ( compressed2 & ~( ( 0x2 << 31 ) - 1 ) ) | ( bestPixIndices & ( ( 2 << 31 ) - 1 ) );
  2009. return bestErr;
  2010. }
  2011. //#endif
  2012. template<class T, class S>
  2013. static etcpak_force_inline uint64_t EncodeSelectors( uint64_t d, const T terr[2][8], const S tsel[16][8], const uint32_t* id, const uint64_t value, const uint64_t error)
  2014. {
  2015. size_t tidx[2];
  2016. tidx[0] = GetLeastError( terr[0], 8 );
  2017. tidx[1] = GetLeastError( terr[1], 8 );
  2018. if ((terr[0][tidx[0]] + terr[1][tidx[1]]) >= error)
  2019. {
  2020. return value;
  2021. }
  2022. d |= tidx[0] << 26;
  2023. d |= tidx[1] << 29;
  2024. for( int i=0; i<16; i++ )
  2025. {
  2026. uint64_t t = tsel[i][tidx[id[i]%2]];
  2027. d |= ( t & 0x1 ) << ( i + 32 );
  2028. d |= ( t & 0x2 ) << ( i + 47 );
  2029. }
  2030. return FixByteOrder(d);
  2031. }
  2032. }
  2033. static etcpak_force_inline uint64_t ProcessRGB( const uint8_t* src )
  2034. {
  2035. #ifdef __AVX2__
  2036. uint64_t d = CheckSolid_AVX2( src );
  2037. if( d != 0 ) return d;
  2038. alignas(32) v4i a[8];
  2039. __m128i err0 = PrepareAverages_AVX2( a, src );
  2040. // Get index of minimum error (err0)
  2041. __m128i err1 = _mm_shuffle_epi32(err0, _MM_SHUFFLE(2, 3, 0, 1));
  2042. __m128i errMin0 = _mm_min_epu32(err0, err1);
  2043. __m128i errMin1 = _mm_shuffle_epi32(errMin0, _MM_SHUFFLE(1, 0, 3, 2));
  2044. __m128i errMin2 = _mm_min_epu32(errMin1, errMin0);
  2045. __m128i errMask = _mm_cmpeq_epi32(errMin2, err0);
  2046. uint32_t mask = _mm_movemask_epi8(errMask);
  2047. uint32_t idx = _bit_scan_forward(mask) >> 2;
  2048. d |= EncodeAverages_AVX2( a, idx );
  2049. alignas(32) uint32_t terr[2][8] = {};
  2050. alignas(32) uint32_t tsel[8];
  2051. if ((idx == 0) || (idx == 2))
  2052. {
  2053. FindBestFit_4x2_AVX2( terr, tsel, a, idx * 2, src );
  2054. }
  2055. else
  2056. {
  2057. FindBestFit_2x4_AVX2( terr, tsel, a, idx * 2, src );
  2058. }
  2059. return EncodeSelectors_AVX2( d, terr, tsel, (idx % 2) == 1 );
  2060. #else
  2061. uint64_t d = CheckSolid( src );
  2062. if( d != 0 ) return d;
  2063. v4i a[8];
  2064. unsigned int err[4] = {};
  2065. PrepareAverages( a, src, err );
  2066. size_t idx = GetLeastError( err, 4 );
  2067. EncodeAverages( d, a, idx );
  2068. #if ( defined __SSE4_1__ || defined __ARM_NEON ) && !defined REFERENCE_IMPLEMENTATION
  2069. uint32_t terr[2][8] = {};
  2070. #else
  2071. uint64_t terr[2][8] = {};
  2072. #endif
  2073. uint16_t tsel[16][8];
  2074. auto id = g_id[idx];
  2075. FindBestFit( terr, tsel, a, id, src );
  2076. return FixByteOrder( EncodeSelectors( d, terr, tsel, id ) );
  2077. #endif
  2078. }
  2079. #ifdef __AVX2__
  2080. // horizontal min/max functions. https://stackoverflow.com/questions/22256525/horizontal-minimum-and-maximum-using-sse
  2081. // if an error occurs in GCC, please change the value of -march in CFLAGS to a specific value for your CPU (e.g., skylake).
  2082. static inline int16_t hMax( __m128i buffer, uint8_t& idx )
  2083. {
  2084. __m128i tmp1 = _mm_sub_epi8( _mm_set1_epi8( (char)( 255 ) ), buffer );
  2085. __m128i tmp2 = _mm_min_epu8( tmp1, _mm_srli_epi16( tmp1, 8 ) );
  2086. __m128i tmp3 = _mm_minpos_epu16( tmp2 );
  2087. uint8_t result = 255 - (uint8_t)_mm_cvtsi128_si32( tmp3 );
  2088. __m128i mask = _mm_cmpeq_epi8( buffer, _mm_set1_epi8( result ) );
  2089. idx = _tzcnt_u32( _mm_movemask_epi8( mask ) );
  2090. return result;
  2091. }
  2092. #elif defined __ARM_NEON && defined __aarch64__
  2093. static inline int16_t hMax( uint8x16_t buffer, uint8_t& idx )
  2094. {
  2095. const uint8_t max = vmaxvq_u8( buffer );
  2096. const uint16x8_t vmax = vdupq_n_u16( max );
  2097. uint8x16x2_t buff_wide = vzipq_u8( buffer, uint8x16_t() );
  2098. uint16x8_t lowbuf16 = vreinterpretq_u16_u8( buff_wide.val[0] );
  2099. uint16x8_t hibuf16 = vreinterpretq_u16_u8( buff_wide.val[1] );
  2100. uint16x8_t low_eqmask = vceqq_u16( lowbuf16, vmax );
  2101. uint16x8_t hi_eqmask = vceqq_u16( hibuf16, vmax );
  2102. static const uint16_t mask_lsb[] = {
  2103. 0x1, 0x2, 0x4, 0x8,
  2104. 0x10, 0x20, 0x40, 0x80 };
  2105. static const uint16_t mask_msb[] = {
  2106. 0x100, 0x200, 0x400, 0x800,
  2107. 0x1000, 0x2000, 0x4000, 0x8000 };
  2108. uint16x8_t vmask_lsb = vld1q_u16( mask_lsb );
  2109. uint16x8_t vmask_msb = vld1q_u16( mask_msb );
  2110. uint16x8_t pos_lsb = vandq_u16( vmask_lsb, low_eqmask );
  2111. uint16x8_t pos_msb = vandq_u16( vmask_msb, hi_eqmask );
  2112. pos_lsb = vpaddq_u16( pos_lsb, pos_lsb );
  2113. pos_lsb = vpaddq_u16( pos_lsb, pos_lsb );
  2114. pos_lsb = vpaddq_u16( pos_lsb, pos_lsb );
  2115. uint64_t idx_lane1 = vgetq_lane_u64( vreinterpretq_u64_u16( pos_lsb ), 0 );
  2116. pos_msb = vpaddq_u16( pos_msb, pos_msb );
  2117. pos_msb = vpaddq_u16( pos_msb, pos_msb );
  2118. pos_msb = vpaddq_u16( pos_msb, pos_msb );
  2119. uint32_t idx_lane2 = vgetq_lane_u32( vreinterpretq_u32_u16( pos_msb ), 0 );
  2120. idx = idx_lane1 != 0 ? __builtin_ctz( idx_lane1 ) : __builtin_ctz( idx_lane2 );
  2121. return max;
  2122. }
  2123. #endif
  2124. #ifdef __AVX2__
  2125. static inline int16_t hMin( __m128i buffer, uint8_t& idx )
  2126. {
  2127. __m128i tmp2 = _mm_min_epu8( buffer, _mm_srli_epi16( buffer, 8 ) );
  2128. __m128i tmp3 = _mm_minpos_epu16( tmp2 );
  2129. uint8_t result = (uint8_t)_mm_cvtsi128_si32( tmp3 );
  2130. __m128i mask = _mm_cmpeq_epi8( buffer, _mm_set1_epi8( result ) );
  2131. idx = _tzcnt_u32( _mm_movemask_epi8( mask ) );
  2132. return result;
  2133. }
  2134. #elif defined __ARM_NEON && defined __aarch64__
  2135. static inline int16_t hMin( uint8x16_t buffer, uint8_t& idx )
  2136. {
  2137. const uint8_t min = vminvq_u8( buffer );
  2138. const uint16x8_t vmin = vdupq_n_u16( min );
  2139. uint8x16x2_t buff_wide = vzipq_u8( buffer, uint8x16_t() );
  2140. uint16x8_t lowbuf16 = vreinterpretq_u16_u8( buff_wide.val[0] );
  2141. uint16x8_t hibuf16 = vreinterpretq_u16_u8( buff_wide.val[1] );
  2142. uint16x8_t low_eqmask = vceqq_u16( lowbuf16, vmin );
  2143. uint16x8_t hi_eqmask = vceqq_u16( hibuf16, vmin );
  2144. static const uint16_t mask_lsb[] = {
  2145. 0x1, 0x2, 0x4, 0x8,
  2146. 0x10, 0x20, 0x40, 0x80 };
  2147. static const uint16_t mask_msb[] = {
  2148. 0x100, 0x200, 0x400, 0x800,
  2149. 0x1000, 0x2000, 0x4000, 0x8000 };
  2150. uint16x8_t vmask_lsb = vld1q_u16( mask_lsb );
  2151. uint16x8_t vmask_msb = vld1q_u16( mask_msb );
  2152. uint16x8_t pos_lsb = vandq_u16( vmask_lsb, low_eqmask );
  2153. uint16x8_t pos_msb = vandq_u16( vmask_msb, hi_eqmask );
  2154. pos_lsb = vpaddq_u16( pos_lsb, pos_lsb );
  2155. pos_lsb = vpaddq_u16( pos_lsb, pos_lsb );
  2156. pos_lsb = vpaddq_u16( pos_lsb, pos_lsb );
  2157. uint64_t idx_lane1 = vgetq_lane_u64( vreinterpretq_u64_u16( pos_lsb ), 0 );
  2158. pos_msb = vpaddq_u16( pos_msb, pos_msb );
  2159. pos_msb = vpaddq_u16( pos_msb, pos_msb );
  2160. pos_msb = vpaddq_u16( pos_msb, pos_msb );
  2161. uint32_t idx_lane2 = vgetq_lane_u32( vreinterpretq_u32_u16( pos_msb ), 0 );
  2162. idx = idx_lane1 != 0 ? __builtin_ctz( idx_lane1 ) : __builtin_ctz( idx_lane2 );
  2163. return min;
  2164. }
  2165. #endif
  2166. // During search it is not convenient to store the bits the way they are stored in the
  2167. // file format. Hence, after search, it is converted to this format.
  2168. // NO WARRANTY --- SEE STATEMENT IN TOP OF FILE (C) Ericsson AB 2005-2013. All Rights Reserved.
  2169. static inline void stuff59bits( unsigned int thumbT59W1, unsigned int thumbT59W2, unsigned int& thumbTW1, unsigned int& thumbTW2 )
  2170. {
  2171. // Put bits in twotimer configuration for 59 (red overflows)
  2172. //
  2173. // Go from this bit layout:
  2174. //
  2175. // |63 62 61 60 59|58 57 56 55|54 53 52 51|50 49 48 47|46 45 44 43|42 41 40 39|38 37 36 35|34 33 32|
  2176. // |----empty-----|---red 0---|--green 0--|--blue 0---|---red 1---|--green 1--|--blue 1---|--dist--|
  2177. //
  2178. // |31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00|
  2179. // |----------------------------------------index bits---------------------------------------------|
  2180. //
  2181. //
  2182. // To this:
  2183. //
  2184. // 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32
  2185. // -----------------------------------------------------------------------------------------------
  2186. // |// // //|R0a |//|R0b |G0 |B0 |R1 |G1 |B1 |da |df|db|
  2187. // -----------------------------------------------------------------------------------------------
  2188. //
  2189. // |31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00|
  2190. // |----------------------------------------index bits---------------------------------------------|
  2191. //
  2192. // 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32
  2193. // -----------------------------------------------------------------------------------------------
  2194. // | base col1 | dcol 2 | base col1 | dcol 2 | base col 1 | dcol 2 | table | table |df|fp|
  2195. // | R1' (5 bits) | dR2 | G1' (5 bits) | dG2 | B1' (5 bits) | dB2 | cw 1 | cw 2 |bt|bt|
  2196. // ------------------------------------------------------------------------------------------------
  2197. uint8_t R0a;
  2198. uint8_t bit, a, b, c, d, bits;
  2199. R0a = ( thumbT59W1 >> 25 ) & 0x3;
  2200. // Fix middle part
  2201. thumbTW1 = thumbT59W1 << 1;
  2202. // Fix R0a (top two bits of R0)
  2203. thumbTW1 = ( thumbTW1 & ~( 0x3 << 27 ) ) | ( ( R0a & 0x3 ) << 27 );
  2204. // Fix db (lowest bit of d)
  2205. thumbTW1 = ( thumbTW1 & ~0x1 ) | ( thumbT59W1 & 0x1 );
  2206. // Make sure that red overflows:
  2207. a = ( thumbTW1 >> 28 ) & 0x1;
  2208. b = ( thumbTW1 >> 27 ) & 0x1;
  2209. c = ( thumbTW1 >> 25 ) & 0x1;
  2210. d = ( thumbTW1 >> 24 ) & 0x1;
  2211. // The following bit abcd bit sequences should be padded with ones: 0111, 1010, 1011, 1101, 1110, 1111
  2212. // The following logical expression checks for the presence of any of those:
  2213. bit = ( a & c ) | ( !a & b & c & d ) | ( a & b & !c & d );
  2214. bits = 0xf * bit;
  2215. thumbTW1 = ( thumbTW1 & ~( 0x7 << 29 ) ) | ( bits & 0x7 ) << 29;
  2216. thumbTW1 = ( thumbTW1 & ~( 0x1 << 26 ) ) | ( !bit & 0x1 ) << 26;
  2217. // Set diffbit
  2218. thumbTW1 = ( thumbTW1 & ~0x2 ) | 0x2;
  2219. thumbTW2 = thumbT59W2;
  2220. }
  2221. // During search it is not convenient to store the bits the way they are stored in the
  2222. // file format. Hence, after search, it is converted to this format.
  2223. // NO WARRANTY --- SEE STATEMENT IN TOP OF FILE (C) Ericsson AB 2005-2013. All Rights Reserved.
  2224. static inline void stuff58bits( unsigned int thumbH58W1, unsigned int thumbH58W2, unsigned int& thumbHW1, unsigned int& thumbHW2 )
  2225. {
  2226. // Put bits in twotimer configuration for 58 (red doesn't overflow, green does)
  2227. //
  2228. // Go from this bit layout:
  2229. //
  2230. //
  2231. // |63 62 61 60 59 58|57 56 55 54|53 52 51 50|49 48 47 46|45 44 43 42|41 40 39 38|37 36 35 34|33 32|
  2232. // |-------empty-----|---red 0---|--green 0--|--blue 0---|---red 1---|--green 1--|--blue 1---|d2 d1|
  2233. //
  2234. // |31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00|
  2235. // |---------------------------------------index bits----------------------------------------------|
  2236. //
  2237. // To this:
  2238. //
  2239. // 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32
  2240. // -----------------------------------------------------------------------------------------------
  2241. // |//|R0 |G0 |// // //|G0|B0|//|B0b |R1 |G1 |B0 |d2|df|d1|
  2242. // -----------------------------------------------------------------------------------------------
  2243. //
  2244. // |31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00|
  2245. // |---------------------------------------index bits----------------------------------------------|
  2246. //
  2247. // 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32
  2248. // -----------------------------------------------------------------------------------------------
  2249. // | base col1 | dcol 2 | base col1 | dcol 2 | base col 1 | dcol 2 | table | table |df|fp|
  2250. // | R1' (5 bits) | dR2 | G1' (5 bits) | dG2 | B1' (5 bits) | dB2 | cw 1 | cw 2 |bt|bt|
  2251. // -----------------------------------------------------------------------------------------------
  2252. //
  2253. //
  2254. // Thus, what we are really doing is going from this bit layout:
  2255. //
  2256. //
  2257. // |63 62 61 60 59 58|57 56 55 54 53 52 51|50 49|48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33|32 |
  2258. // |-------empty-----|part0---------------|part1|part2------------------------------------------|part3|
  2259. //
  2260. // To this:
  2261. //
  2262. // 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32
  2263. // --------------------------------------------------------------------------------------------------|
  2264. // |//|part0 |// // //|part1|//|part2 |df|part3|
  2265. // --------------------------------------------------------------------------------------------------|
  2266. unsigned int part0, part1, part2, part3;
  2267. uint8_t bit, a, b, c, d, bits;
  2268. // move parts
  2269. part0 = ( thumbH58W1 >> 19 ) & 0x7f;
  2270. part1 = ( thumbH58W1 >> 17 ) & 0x3;
  2271. part2 = ( thumbH58W1 >> 1 ) & 0xffff;
  2272. part3 = thumbH58W1 & 0x1;
  2273. thumbHW1 = 0;
  2274. thumbHW1 = ( thumbHW1 & ~( 0x7f << 24 ) ) | ( ( part0 & 0x7f ) << 24 );
  2275. thumbHW1 = ( thumbHW1 & ~( 0x3 << 19 ) ) | ( ( part1 & 0x3 ) << 19 );
  2276. thumbHW1 = ( thumbHW1 & ~( 0xffff << 2 ) ) | ( ( part2 & 0xffff ) << 2 );
  2277. thumbHW1 = ( thumbHW1 & ~0x1 ) | ( part3 & 0x1 );
  2278. // Make sure that red does not overflow:
  2279. bit = ( thumbHW1 >> 30 ) & 0x1;
  2280. thumbHW1 = ( thumbHW1 & ~( 0x1 << 31 ) ) | ( ( !bit & 0x1 ) << 31 );
  2281. // Make sure that green overflows:
  2282. a = ( thumbHW1 >> 20 ) & 0x1;
  2283. b = ( thumbHW1 >> 19 ) & 0x1;
  2284. c = ( thumbHW1 >> 17 ) & 0x1;
  2285. d = ( thumbHW1 >> 16 ) & 0x1;
  2286. // The following bit abcd bit sequences should be padded with ones: 0111, 1010, 1011, 1101, 1110, 1111
  2287. // The following logical expression checks for the presence of any of those:
  2288. bit = ( a & c ) | ( !a & b & c & d ) | ( a & b & !c & d );
  2289. bits = 0xf * bit;
  2290. thumbHW1 = ( thumbHW1 & ~( 0x7 << 21 ) ) | ( ( bits & 0x7 ) << 21 );
  2291. thumbHW1 = ( thumbHW1 & ~( 0x1 << 18 ) ) | ( ( !bit & 0x1 ) << 18 );
  2292. // Set diffbit
  2293. thumbHW1 = ( thumbHW1 & ~0x2 ) | 0x2;
  2294. thumbHW2 = thumbH58W2;
  2295. }
  2296. #if defined __AVX2__ || (defined __ARM_NEON && defined __aarch64__)
  2297. static etcpak_force_inline Channels GetChannels( const uint8_t* src )
  2298. {
  2299. Channels ch;
  2300. #ifdef __AVX2__
  2301. __m128i d0 = _mm_loadu_si128( ( (__m128i*)src ) + 0 );
  2302. __m128i d1 = _mm_loadu_si128( ( (__m128i*)src ) + 1 );
  2303. __m128i d2 = _mm_loadu_si128( ( (__m128i*)src ) + 2 );
  2304. __m128i d3 = _mm_loadu_si128( ( (__m128i*)src ) + 3 );
  2305. __m128i rgb0 = _mm_shuffle_epi8( d0, _mm_setr_epi8( 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, -1, -1, -1, -1 ) );
  2306. __m128i rgb1 = _mm_shuffle_epi8( d1, _mm_setr_epi8( 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, -1, -1, -1, -1 ) );
  2307. __m128i rgb2 = _mm_shuffle_epi8( d2, _mm_setr_epi8( 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, -1, -1, -1, -1 ) );
  2308. __m128i rgb3 = _mm_shuffle_epi8( d3, _mm_setr_epi8( 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, -1, -1, -1, -1 ) );
  2309. __m128i rg0 = _mm_unpacklo_epi32( rgb0, rgb1 );
  2310. __m128i rg1 = _mm_unpacklo_epi32( rgb2, rgb3 );
  2311. __m128i b0 = _mm_unpackhi_epi32( rgb0, rgb1 );
  2312. __m128i b1 = _mm_unpackhi_epi32( rgb2, rgb3 );
  2313. // swap channels
  2314. ch.b8 = _mm_unpacklo_epi64( rg0, rg1 );
  2315. ch.g8 = _mm_unpackhi_epi64( rg0, rg1 );
  2316. ch.r8 = _mm_unpacklo_epi64( b0, b1 );
  2317. #elif defined __ARM_NEON && defined __aarch64__
  2318. //load pixel data into 4 rows
  2319. uint8x16_t px0 = vld1q_u8( src + 0 );
  2320. uint8x16_t px1 = vld1q_u8( src + 16 );
  2321. uint8x16_t px2 = vld1q_u8( src + 32 );
  2322. uint8x16_t px3 = vld1q_u8( src + 48 );
  2323. uint8x16x2_t px0z1 = vzipq_u8( px0, px1 );
  2324. uint8x16x2_t px2z3 = vzipq_u8( px2, px3 );
  2325. uint8x16x2_t px01 = vzipq_u8( px0z1.val[0], px0z1.val[1] );
  2326. uint8x16x2_t rgb01 = vzipq_u8( px01.val[0], px01.val[1] );
  2327. uint8x16x2_t px23 = vzipq_u8( px2z3.val[0], px2z3.val[1] );
  2328. uint8x16x2_t rgb23 = vzipq_u8( px23.val[0], px23.val[1] );
  2329. uint8x16_t rr = vreinterpretq_u8_u64( vzip1q_u64( vreinterpretq_u64_u8( rgb01.val[0] ), vreinterpretq_u64_u8( rgb23.val[0] ) ) );
  2330. uint8x16_t gg = vreinterpretq_u8_u64( vzip2q_u64( vreinterpretq_u64_u8( rgb01.val[0] ), vreinterpretq_u64_u8( rgb23.val[0] ) ) );
  2331. uint8x16_t bb = vreinterpretq_u8_u64( vzip1q_u64( vreinterpretq_u64_u8( rgb01.val[1] ), vreinterpretq_u64_u8( rgb23.val[1] ) ) );
  2332. uint8x16x2_t red = vzipq_u8( rr, uint8x16_t() );
  2333. uint8x16x2_t grn = vzipq_u8( gg, uint8x16_t() );
  2334. uint8x16x2_t blu = vzipq_u8( bb, uint8x16_t() );
  2335. ch.r = red;
  2336. ch.b = blu;
  2337. ch.g = grn;
  2338. #endif
  2339. return ch;
  2340. }
  2341. #endif
  2342. #if defined __AVX2__ || (defined __ARM_NEON && defined __aarch64__)
  2343. static etcpak_force_inline void CalculateLuma( Channels& ch, Luma& luma )
  2344. #else
  2345. static etcpak_force_inline void CalculateLuma( const uint8_t* src, Luma& luma )
  2346. #endif
  2347. {
  2348. #ifdef __AVX2__
  2349. __m256i b16_luma = _mm256_mullo_epi16( _mm256_cvtepu8_epi16( ch.b8 ), _mm256_set1_epi16( 14 ) );
  2350. __m256i g16_luma = _mm256_mullo_epi16( _mm256_cvtepu8_epi16( ch.g8 ), _mm256_set1_epi16( 76 ) );
  2351. __m256i r16_luma = _mm256_mullo_epi16( _mm256_cvtepu8_epi16( ch.r8 ), _mm256_set1_epi16( 38 ) );
  2352. __m256i luma_16bit = _mm256_add_epi16( _mm256_add_epi16( g16_luma, r16_luma ), b16_luma );
  2353. __m256i luma_8bit_m256i = _mm256_srli_epi16( luma_16bit, 7 );
  2354. __m128i luma_8bit_lo = _mm256_extractf128_si256( luma_8bit_m256i, 0 );
  2355. __m128i luma_8bit_hi = _mm256_extractf128_si256( luma_8bit_m256i, 1 );
  2356. static const __m128i interleaving_mask_lo = _mm_set_epi8( 15, 13, 11, 9, 7, 5, 3, 1, 14, 12, 10, 8, 6, 4, 2, 0 );
  2357. static const __m128i interleaving_mask_hi = _mm_set_epi8( 14, 12, 10, 8, 6, 4, 2, 0, 15, 13, 11, 9, 7, 5, 3, 1 );
  2358. __m128i luma_8bit_lo_moved = _mm_shuffle_epi8( luma_8bit_lo, interleaving_mask_lo );
  2359. __m128i luma_8bit_hi_moved = _mm_shuffle_epi8( luma_8bit_hi, interleaving_mask_hi );
  2360. __m128i luma_8bit = _mm_or_si128( luma_8bit_hi_moved, luma_8bit_lo_moved );
  2361. luma.luma8 = luma_8bit;
  2362. // min/max calculation
  2363. luma.min = hMin( luma_8bit, luma.minIdx ) * 0.00392156f;
  2364. luma.max = hMax( luma_8bit, luma.maxIdx ) * 0.00392156f;
  2365. #elif defined __ARM_NEON && defined __aarch64__
  2366. //load pixel data into 4 rows
  2367. uint16x8_t red0 = vmulq_n_u16( vreinterpretq_u16_u8( ch.r.val[0] ), 14 );
  2368. uint16x8_t red1 = vmulq_n_u16( vreinterpretq_u16_u8( ch.r.val[1] ), 14 );
  2369. uint16x8_t grn0 = vmulq_n_u16( vreinterpretq_u16_u8( ch.g.val[0] ), 76 );
  2370. uint16x8_t grn1 = vmulq_n_u16( vreinterpretq_u16_u8( ch.g.val[1] ), 76 );
  2371. uint16x8_t blu0 = vmulq_n_u16( vreinterpretq_u16_u8( ch.b.val[0] ), 38 );
  2372. uint16x8_t blu1 = vmulq_n_u16( vreinterpretq_u16_u8( ch.b.val[1] ), 38 );
  2373. //calculate luma for rows 0,1 and 2,3
  2374. uint16x8_t lum_r01 = vaddq_u16( vaddq_u16( red0, grn0 ), blu0 );
  2375. uint16x8_t lum_r23 = vaddq_u16( vaddq_u16( red1, grn1 ), blu1 );
  2376. //divide luma values with right shift and narrow results to 8bit
  2377. uint8x8_t lum_r01_d = vshrn_n_u16( lum_r01, 7 );
  2378. uint8x8_t lum_r02_d = vshrn_n_u16( lum_r23, 7 );
  2379. luma.luma8 = vcombine_u8( lum_r01_d, lum_r02_d );
  2380. //find min and max luma value
  2381. luma.min = hMin( luma.luma8, luma.minIdx ) * 0.00392156f;
  2382. luma.max = hMax( luma.luma8, luma.maxIdx ) * 0.00392156f;
  2383. #else
  2384. for( int i = 0; i < 16; ++i )
  2385. {
  2386. luma.val[i] = ( src[i * 4 + 2] * 76 + src[i * 4 + 1] * 150 + src[i * 4] * 28 ) / 254; // luma calculation
  2387. if( luma.min > luma.val[i] )
  2388. {
  2389. luma.min = luma.val[i];
  2390. luma.minIdx = i;
  2391. }
  2392. if( luma.max < luma.val[i] )
  2393. {
  2394. luma.max = luma.val[i];
  2395. luma.maxIdx = i;
  2396. }
  2397. }
  2398. #endif
  2399. }
  2400. static etcpak_force_inline uint8_t SelectModeETC2( const Luma& luma )
  2401. {
  2402. #if defined __AVX2__ || defined __ARM_NEON
  2403. const float lumaRange = ( luma.max - luma.min );
  2404. #else
  2405. const float lumaRange = ( luma.max - luma.min ) * ( 1.f / 255.f );
  2406. #endif
  2407. // filters a very-low-contrast block
  2408. if( lumaRange <= ecmd_threshold[0] )
  2409. {
  2410. return ModePlanar;
  2411. }
  2412. // checks whether a pair of the corner pixels in a block has the min/max luma values;
  2413. // if so, the ETC2 planar mode is enabled, and otherwise, the ETC1 mode is enabled
  2414. else if( lumaRange <= ecmd_threshold[1] )
  2415. {
  2416. #ifdef __AVX2__
  2417. static const __m128i corner_pair = _mm_set_epi8( 1, 1, 1, 1, 1, 1, 1, 1, 0, 15, 3, 12, 12, 3, 15, 0 );
  2418. __m128i current_max_min = _mm_set_epi8( 0, 0, 0, 0, 0, 0, 0, 0, luma.minIdx, luma.maxIdx, luma.minIdx, luma.maxIdx, luma.minIdx, luma.maxIdx, luma.minIdx, luma.maxIdx );
  2419. __m128i max_min_result = _mm_cmpeq_epi16( corner_pair, current_max_min );
  2420. int mask = _mm_movemask_epi8( max_min_result );
  2421. if( mask )
  2422. {
  2423. return ModePlanar;
  2424. }
  2425. #else
  2426. // check whether a pair of the corner pixels in a block has the min/max luma values;
  2427. // if so, the ETC2 planar mode is enabled.
  2428. if( ( luma.minIdx == 0 && luma.maxIdx == 15 ) ||
  2429. ( luma.minIdx == 15 && luma.maxIdx == 0 ) ||
  2430. ( luma.minIdx == 3 && luma.maxIdx == 12 ) ||
  2431. ( luma.minIdx == 12 && luma.maxIdx == 3 ) )
  2432. {
  2433. return ModePlanar;
  2434. }
  2435. #endif
  2436. }
  2437. // filters a high-contrast block for checking both ETC1 mode and the ETC2 T/H mode
  2438. else if( lumaRange >= ecmd_threshold[2] )
  2439. {
  2440. return ModeTH;
  2441. }
  2442. return ModeUndecided;
  2443. }
  2444. static etcpak_force_inline uint64_t ProcessRGB_ETC2( const uint8_t* src, bool useHeuristics )
  2445. {
  2446. #ifdef __AVX2__
  2447. uint64_t d = CheckSolid_AVX2( src );
  2448. if( d != 0 ) return d;
  2449. #else
  2450. uint64_t d = CheckSolid( src );
  2451. if (d != 0) return d;
  2452. #endif
  2453. uint8_t mode = ModeUndecided;
  2454. Luma luma;
  2455. #ifdef __AVX2__
  2456. Channels ch = GetChannels( src );
  2457. if( useHeuristics )
  2458. {
  2459. CalculateLuma( ch, luma );
  2460. mode = SelectModeETC2( luma );
  2461. }
  2462. auto plane = Planar_AVX2( ch, mode, useHeuristics );
  2463. if( useHeuristics && mode == ModePlanar ) return plane.plane;
  2464. alignas( 32 ) v4i a[8];
  2465. __m128i err0 = PrepareAverages_AVX2( a, plane.sum4 );
  2466. // Get index of minimum error (err0)
  2467. __m128i err1 = _mm_shuffle_epi32( err0, _MM_SHUFFLE( 2, 3, 0, 1 ) );
  2468. __m128i errMin0 = _mm_min_epu32(err0, err1);
  2469. __m128i errMin1 = _mm_shuffle_epi32( errMin0, _MM_SHUFFLE( 1, 0, 3, 2 ) );
  2470. __m128i errMin2 = _mm_min_epu32( errMin1, errMin0 );
  2471. __m128i errMask = _mm_cmpeq_epi32( errMin2, err0 );
  2472. uint32_t mask = _mm_movemask_epi8( errMask );
  2473. size_t idx = _bit_scan_forward( mask ) >> 2;
  2474. d = EncodeAverages_AVX2( a, idx );
  2475. alignas(32) uint32_t terr[2][8] = {};
  2476. alignas(32) uint32_t tsel[8];
  2477. if ((idx == 0) || (idx == 2))
  2478. {
  2479. FindBestFit_4x2_AVX2( terr, tsel, a, idx * 2, src );
  2480. }
  2481. else
  2482. {
  2483. FindBestFit_2x4_AVX2( terr, tsel, a, idx * 2, src );
  2484. }
  2485. if( useHeuristics )
  2486. {
  2487. if( mode == ModeTH )
  2488. {
  2489. uint64_t result = 0;
  2490. uint64_t error = 0;
  2491. uint32_t compressed[4] = { 0, 0, 0, 0 };
  2492. bool tMode = false;
  2493. error = compressBlockTH( (uint8_t*)src, luma, compressed[0], compressed[1], tMode, ch.r8, ch.g8, ch.b8 );
  2494. if( tMode )
  2495. {
  2496. stuff59bits( compressed[0], compressed[1], compressed[2], compressed[3] );
  2497. }
  2498. else
  2499. {
  2500. stuff58bits( compressed[0], compressed[1], compressed[2], compressed[3] );
  2501. }
  2502. result = (uint32_t)_bswap( compressed[2] );
  2503. result |= static_cast<uint64_t>( _bswap( compressed[3] ) ) << 32;
  2504. plane.plane = result;
  2505. plane.error = error;
  2506. }
  2507. else
  2508. {
  2509. plane.plane = 0;
  2510. plane.error = MaxError;
  2511. }
  2512. }
  2513. return EncodeSelectors_AVX2( d, terr, tsel, ( idx % 2 ) == 1, plane.plane, plane.error );
  2514. #else
  2515. if( useHeuristics )
  2516. {
  2517. #if defined __ARM_NEON && defined __aarch64__
  2518. Channels ch = GetChannels( src );
  2519. CalculateLuma( ch, luma );
  2520. #else
  2521. CalculateLuma( src, luma );
  2522. #endif
  2523. mode = SelectModeETC2( luma );
  2524. }
  2525. #ifdef __ARM_NEON
  2526. auto result = Planar_NEON( src, mode, useHeuristics );
  2527. #else
  2528. auto result = Planar( src, mode, useHeuristics );
  2529. #endif
  2530. if( result.second == 0 ) return result.first;
  2531. v4i a[8];
  2532. unsigned int err[4] = {};
  2533. PrepareAverages( a, src, err );
  2534. size_t idx = GetLeastError( err, 4 );
  2535. EncodeAverages( d, a, idx );
  2536. #if ( defined __SSE4_1__ || defined __ARM_NEON ) && !defined REFERENCE_IMPLEMENTATION
  2537. uint32_t terr[2][8] = {};
  2538. #else
  2539. uint64_t terr[2][8] = {};
  2540. #endif
  2541. uint16_t tsel[16][8];
  2542. auto id = g_id[idx];
  2543. FindBestFit( terr, tsel, a, id, src );
  2544. if( useHeuristics )
  2545. {
  2546. if( mode == ModeTH )
  2547. {
  2548. uint32_t compressed[4] = { 0, 0, 0, 0 };
  2549. bool tMode = false;
  2550. result.second = compressBlockTH( (uint8_t*)src, luma, compressed[0], compressed[1], tMode );
  2551. if( tMode )
  2552. {
  2553. stuff59bits( compressed[0], compressed[1], compressed[2], compressed[3] );
  2554. }
  2555. else
  2556. {
  2557. stuff58bits( compressed[0], compressed[1], compressed[2], compressed[3] );
  2558. }
  2559. result.first = (uint32_t)_bswap( compressed[2] );
  2560. result.first |= static_cast<uint64_t>( _bswap( compressed[3] ) ) << 32;
  2561. }
  2562. else
  2563. {
  2564. result.first = 0;
  2565. result.second = MaxError;
  2566. }
  2567. }
  2568. return EncodeSelectors( d, terr, tsel, id, result.first, result.second );
  2569. #endif
  2570. }
  2571. #ifdef __SSE4_1__
  2572. template<int K>
  2573. static etcpak_force_inline __m128i Widen( const __m128i src )
  2574. {
  2575. static_assert( K >= 0 && K <= 7, "Index out of range" );
  2576. __m128i tmp;
  2577. switch( K )
  2578. {
  2579. case 0:
  2580. tmp = _mm_shufflelo_epi16( src, _MM_SHUFFLE( 0, 0, 0, 0 ) );
  2581. return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 0, 0, 0, 0 ) );
  2582. case 1:
  2583. tmp = _mm_shufflelo_epi16( src, _MM_SHUFFLE( 1, 1, 1, 1 ) );
  2584. return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 0, 0, 0, 0 ) );
  2585. case 2:
  2586. tmp = _mm_shufflelo_epi16( src, _MM_SHUFFLE( 2, 2, 2, 2 ) );
  2587. return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 0, 0, 0, 0 ) );
  2588. case 3:
  2589. tmp = _mm_shufflelo_epi16( src, _MM_SHUFFLE( 3, 3, 3, 3 ) );
  2590. return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 0, 0, 0, 0 ) );
  2591. case 4:
  2592. tmp = _mm_shufflehi_epi16( src, _MM_SHUFFLE( 0, 0, 0, 0 ) );
  2593. return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 2, 2, 2, 2 ) );
  2594. case 5:
  2595. tmp = _mm_shufflehi_epi16( src, _MM_SHUFFLE( 1, 1, 1, 1 ) );
  2596. return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 2, 2, 2, 2 ) );
  2597. case 6:
  2598. tmp = _mm_shufflehi_epi16( src, _MM_SHUFFLE( 2, 2, 2, 2 ) );
  2599. return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 2, 2, 2, 2 ) );
  2600. case 7:
  2601. tmp = _mm_shufflehi_epi16( src, _MM_SHUFFLE( 3, 3, 3, 3 ) );
  2602. return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 2, 2, 2, 2 ) );
  2603. }
  2604. }
  2605. static etcpak_force_inline int GetMulSel( int sel )
  2606. {
  2607. switch( sel )
  2608. {
  2609. case 0:
  2610. return 0;
  2611. case 1:
  2612. case 2:
  2613. case 3:
  2614. return 1;
  2615. case 4:
  2616. return 2;
  2617. case 5:
  2618. case 6:
  2619. case 7:
  2620. return 3;
  2621. case 8:
  2622. case 9:
  2623. case 10:
  2624. case 11:
  2625. case 12:
  2626. case 13:
  2627. return 4;
  2628. case 14:
  2629. case 15:
  2630. return 5;
  2631. }
  2632. }
  2633. #endif
  2634. #ifdef __ARM_NEON
  2635. static constexpr etcpak_force_inline int GetMulSel(int sel)
  2636. {
  2637. return ( sel < 1 ) ? 0 : ( sel < 4 ) ? 1 : ( sel < 5 ) ? 2 : ( sel < 8 ) ? 3 : ( sel < 14 ) ? 4 : 5;
  2638. }
  2639. static constexpr int ClampConstant( int x, int min, int max )
  2640. {
  2641. return x < min ? min : x > max ? max : x;
  2642. }
  2643. template <int Index>
  2644. etcpak_force_inline static uint16x8_t ErrorProbe_EAC_NEON( uint8x8_t recVal, uint8x16_t alphaBlock )
  2645. {
  2646. uint8x8_t srcValWide;
  2647. #ifndef __aarch64__
  2648. if( Index < 8 )
  2649. srcValWide = vdup_lane_u8( vget_low_u8( alphaBlock ), ClampConstant( Index, 0, 7 ) );
  2650. else
  2651. srcValWide = vdup_lane_u8( vget_high_u8( alphaBlock ), ClampConstant( Index - 8, 0, 7 ) );
  2652. #else
  2653. srcValWide = vdup_laneq_u8( alphaBlock, Index );
  2654. #endif
  2655. uint8x8_t deltaVal = vabd_u8( srcValWide, recVal );
  2656. return vmull_u8( deltaVal, deltaVal );
  2657. }
  2658. etcpak_force_inline static uint16_t MinError_EAC_NEON( uint16x8_t errProbe )
  2659. {
  2660. #ifndef __aarch64__
  2661. uint16x4_t tmpErr = vpmin_u16( vget_low_u16( errProbe ), vget_high_u16( errProbe ) );
  2662. tmpErr = vpmin_u16( tmpErr, tmpErr );
  2663. return vpmin_u16( tmpErr, tmpErr )[0];
  2664. #else
  2665. return vminvq_u16( errProbe );
  2666. #endif
  2667. }
  2668. template <int Index>
  2669. etcpak_force_inline static uint64_t MinErrorIndex_EAC_NEON( uint8x8_t recVal, uint8x16_t alphaBlock )
  2670. {
  2671. uint16x8_t errProbe = ErrorProbe_EAC_NEON<Index>( recVal, alphaBlock );
  2672. uint16x8_t minErrMask = vceqq_u16( errProbe, vdupq_n_u16( MinError_EAC_NEON( errProbe ) ) );
  2673. uint64_t idx = __builtin_ctzll( vget_lane_u64( vreinterpret_u64_u8( vqmovn_u16( minErrMask ) ), 0 ) );
  2674. idx >>= 3;
  2675. idx <<= 45 - Index * 3;
  2676. return idx;
  2677. }
  2678. template <int Index>
  2679. etcpak_force_inline static int16x8_t WidenMultiplier_EAC_NEON( int16x8_t multipliers )
  2680. {
  2681. constexpr int Lane = GetMulSel( Index );
  2682. #ifndef __aarch64__
  2683. if( Lane < 4 )
  2684. return vdupq_lane_s16( vget_low_s16( multipliers ), ClampConstant( Lane, 0, 3 ) );
  2685. else
  2686. return vdupq_lane_s16( vget_high_s16( multipliers ), ClampConstant( Lane - 4, 0, 3 ) );
  2687. #else
  2688. return vdupq_laneq_s16( multipliers, Lane );
  2689. #endif
  2690. }
  2691. #endif
  2692. template<bool checkSolid = true>
  2693. static etcpak_force_inline uint64_t ProcessAlpha_ETC2( const uint8_t* src )
  2694. {
  2695. #if defined __SSE4_1__
  2696. __m128i s = _mm_loadu_si128( (__m128i*)src );
  2697. if( checkSolid )
  2698. {
  2699. // Check solid
  2700. __m128i solidCmp = _mm_set1_epi8( src[0] );
  2701. __m128i cmpRes = _mm_cmpeq_epi8( s, solidCmp );
  2702. if( _mm_testc_si128( cmpRes, _mm_set1_epi32( -1 ) ) )
  2703. {
  2704. return src[0];
  2705. }
  2706. }
  2707. // Calculate min, max
  2708. __m128i s1 = _mm_shuffle_epi32( s, _MM_SHUFFLE( 2, 3, 0, 1 ) );
  2709. __m128i max1 = _mm_max_epu8( s, s1 );
  2710. __m128i min1 = _mm_min_epu8( s, s1 );
  2711. __m128i smax2 = _mm_shuffle_epi32( max1, _MM_SHUFFLE( 0, 0, 2, 2 ) );
  2712. __m128i smin2 = _mm_shuffle_epi32( min1, _MM_SHUFFLE( 0, 0, 2, 2 ) );
  2713. __m128i max2 = _mm_max_epu8( max1, smax2 );
  2714. __m128i min2 = _mm_min_epu8( min1, smin2 );
  2715. __m128i smax3 = _mm_alignr_epi8( max2, max2, 2 );
  2716. __m128i smin3 = _mm_alignr_epi8( min2, min2, 2 );
  2717. __m128i max3 = _mm_max_epu8( max2, smax3 );
  2718. __m128i min3 = _mm_min_epu8( min2, smin3 );
  2719. __m128i smax4 = _mm_alignr_epi8( max3, max3, 1 );
  2720. __m128i smin4 = _mm_alignr_epi8( min3, min3, 1 );
  2721. __m128i max = _mm_max_epu8( max3, smax4 );
  2722. __m128i min = _mm_min_epu8( min3, smin4 );
  2723. __m128i max16 = _mm_unpacklo_epi8( max, _mm_setzero_si128() );
  2724. __m128i min16 = _mm_unpacklo_epi8( min, _mm_setzero_si128() );
  2725. // src range, mid
  2726. __m128i srcRange = _mm_sub_epi16( max16, min16 );
  2727. __m128i srcRangeHalf = _mm_srli_epi16( srcRange, 1 );
  2728. __m128i srcMid = _mm_add_epi16( min16, srcRangeHalf );
  2729. // multiplier
  2730. __m128i mul1 = _mm_mulhi_epi16( srcRange, g_alphaRange_SIMD );
  2731. __m128i mul = _mm_add_epi16( mul1, _mm_set1_epi16( 1 ) );
  2732. // wide source
  2733. __m128i s16_1 = _mm_shuffle_epi32( s, _MM_SHUFFLE( 3, 2, 3, 2 ) );
  2734. __m128i s16[2] = { _mm_unpacklo_epi8( s, _mm_setzero_si128() ), _mm_unpacklo_epi8( s16_1, _mm_setzero_si128() ) };
  2735. __m128i sr[16] = {
  2736. Widen<0>( s16[0] ),
  2737. Widen<1>( s16[0] ),
  2738. Widen<2>( s16[0] ),
  2739. Widen<3>( s16[0] ),
  2740. Widen<4>( s16[0] ),
  2741. Widen<5>( s16[0] ),
  2742. Widen<6>( s16[0] ),
  2743. Widen<7>( s16[0] ),
  2744. Widen<0>( s16[1] ),
  2745. Widen<1>( s16[1] ),
  2746. Widen<2>( s16[1] ),
  2747. Widen<3>( s16[1] ),
  2748. Widen<4>( s16[1] ),
  2749. Widen<5>( s16[1] ),
  2750. Widen<6>( s16[1] ),
  2751. Widen<7>( s16[1] )
  2752. };
  2753. #ifdef __AVX2__
  2754. __m256i srcRangeWide = _mm256_broadcastsi128_si256( srcRange );
  2755. __m256i srcMidWide = _mm256_broadcastsi128_si256( srcMid );
  2756. __m256i mulWide1 = _mm256_mulhi_epi16( srcRangeWide, g_alphaRange_AVX );
  2757. __m256i mulWide = _mm256_add_epi16( mulWide1, _mm256_set1_epi16( 1 ) );
  2758. __m256i modMul[8] = {
  2759. _mm256_unpacklo_epi8( _mm256_packus_epi16( _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[0] ) ), _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[0] ) ) ), _mm256_setzero_si256() ),
  2760. _mm256_unpacklo_epi8( _mm256_packus_epi16( _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[1] ) ), _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[1] ) ) ), _mm256_setzero_si256() ),
  2761. _mm256_unpacklo_epi8( _mm256_packus_epi16( _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[2] ) ), _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[2] ) ) ), _mm256_setzero_si256() ),
  2762. _mm256_unpacklo_epi8( _mm256_packus_epi16( _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[3] ) ), _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[3] ) ) ), _mm256_setzero_si256() ),
  2763. _mm256_unpacklo_epi8( _mm256_packus_epi16( _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[4] ) ), _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[4] ) ) ), _mm256_setzero_si256() ),
  2764. _mm256_unpacklo_epi8( _mm256_packus_epi16( _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[5] ) ), _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[5] ) ) ), _mm256_setzero_si256() ),
  2765. _mm256_unpacklo_epi8( _mm256_packus_epi16( _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[6] ) ), _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[6] ) ) ), _mm256_setzero_si256() ),
  2766. _mm256_unpacklo_epi8( _mm256_packus_epi16( _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[7] ) ), _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[7] ) ) ), _mm256_setzero_si256() ),
  2767. };
  2768. // find selector
  2769. __m256i mulErr = _mm256_setzero_si256();
  2770. for( int j=0; j<16; j++ )
  2771. {
  2772. __m256i s16Wide = _mm256_broadcastsi128_si256( sr[j] );
  2773. __m256i err1, err2;
  2774. err1 = _mm256_sub_epi16( s16Wide, modMul[0] );
  2775. __m256i localErr = _mm256_mullo_epi16( err1, err1 );
  2776. err1 = _mm256_sub_epi16( s16Wide, modMul[1] );
  2777. err2 = _mm256_mullo_epi16( err1, err1 );
  2778. localErr = _mm256_min_epu16( localErr, err2 );
  2779. err1 = _mm256_sub_epi16( s16Wide, modMul[2] );
  2780. err2 = _mm256_mullo_epi16( err1, err1 );
  2781. localErr = _mm256_min_epu16( localErr, err2 );
  2782. err1 = _mm256_sub_epi16( s16Wide, modMul[3] );
  2783. err2 = _mm256_mullo_epi16( err1, err1 );
  2784. localErr = _mm256_min_epu16( localErr, err2 );
  2785. err1 = _mm256_sub_epi16( s16Wide, modMul[4] );
  2786. err2 = _mm256_mullo_epi16( err1, err1 );
  2787. localErr = _mm256_min_epu16( localErr, err2 );
  2788. err1 = _mm256_sub_epi16( s16Wide, modMul[5] );
  2789. err2 = _mm256_mullo_epi16( err1, err1 );
  2790. localErr = _mm256_min_epu16( localErr, err2 );
  2791. err1 = _mm256_sub_epi16( s16Wide, modMul[6] );
  2792. err2 = _mm256_mullo_epi16( err1, err1 );
  2793. localErr = _mm256_min_epu16( localErr, err2 );
  2794. err1 = _mm256_sub_epi16( s16Wide, modMul[7] );
  2795. err2 = _mm256_mullo_epi16( err1, err1 );
  2796. localErr = _mm256_min_epu16( localErr, err2 );
  2797. // note that this can overflow, but since we're looking for the smallest error, it shouldn't matter
  2798. mulErr = _mm256_adds_epu16( mulErr, localErr );
  2799. }
  2800. uint64_t minPos1 = _mm_cvtsi128_si64( _mm_minpos_epu16( _mm256_castsi256_si128( mulErr ) ) );
  2801. uint64_t minPos2 = _mm_cvtsi128_si64( _mm_minpos_epu16( _mm256_extracti128_si256( mulErr, 1 ) ) );
  2802. int sel = ( ( minPos1 & 0xFFFF ) < ( minPos2 & 0xFFFF ) ) ? ( minPos1 >> 16 ) : ( 8 + ( minPos2 >> 16 ) );
  2803. __m128i recVal16;
  2804. switch( sel )
  2805. {
  2806. case 0:
  2807. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<0>( mul ), g_alpha_SIMD[0] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<0>( mul ), g_alpha_SIMD[0] ) ) ), _mm_setzero_si128() );
  2808. break;
  2809. case 1:
  2810. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[1] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[1] ) ) ), _mm_setzero_si128() );
  2811. break;
  2812. case 2:
  2813. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[2] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[2] ) ) ), _mm_setzero_si128() );
  2814. break;
  2815. case 3:
  2816. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[3] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[3] ) ) ), _mm_setzero_si128() );
  2817. break;
  2818. case 4:
  2819. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<2>( mul ), g_alpha_SIMD[4] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<2>( mul ), g_alpha_SIMD[4] ) ) ), _mm_setzero_si128() );
  2820. break;
  2821. case 5:
  2822. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[5] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[5] ) ) ), _mm_setzero_si128() );
  2823. break;
  2824. case 6:
  2825. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[6] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[6] ) ) ), _mm_setzero_si128() );
  2826. break;
  2827. case 7:
  2828. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[7] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[7] ) ) ), _mm_setzero_si128() );
  2829. break;
  2830. case 8:
  2831. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[8] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[8] ) ) ), _mm_setzero_si128() );
  2832. break;
  2833. case 9:
  2834. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[9] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[9] ) ) ), _mm_setzero_si128() );
  2835. break;
  2836. case 10:
  2837. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[10] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[10] ) ) ), _mm_setzero_si128() );
  2838. break;
  2839. case 11:
  2840. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[11] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[11] ) ) ), _mm_setzero_si128() );
  2841. break;
  2842. case 12:
  2843. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[12] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[12] ) ) ), _mm_setzero_si128() );
  2844. break;
  2845. case 13:
  2846. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[13] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[13] ) ) ), _mm_setzero_si128() );
  2847. break;
  2848. case 14:
  2849. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<5>( mul ), g_alpha_SIMD[14] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<5>( mul ), g_alpha_SIMD[14] ) ) ), _mm_setzero_si128() );
  2850. break;
  2851. case 15:
  2852. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<5>( mul ), g_alpha_SIMD[15] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<5>( mul ), g_alpha_SIMD[15] ) ) ), _mm_setzero_si128() );
  2853. break;
  2854. default:
  2855. assert( false );
  2856. break;
  2857. }
  2858. #else
  2859. // wide multiplier
  2860. __m128i rangeMul[16] = {
  2861. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<0>( mul ), g_alpha_SIMD[0] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<0>( mul ), g_alpha_SIMD[0] ) ) ), _mm_setzero_si128() ),
  2862. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[1] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[1] ) ) ), _mm_setzero_si128() ),
  2863. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[2] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[2] ) ) ), _mm_setzero_si128() ),
  2864. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[3] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[3] ) ) ), _mm_setzero_si128() ),
  2865. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<2>( mul ), g_alpha_SIMD[4] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<2>( mul ), g_alpha_SIMD[4] ) ) ), _mm_setzero_si128() ),
  2866. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[5] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[5] ) ) ), _mm_setzero_si128() ),
  2867. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[6] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[6] ) ) ), _mm_setzero_si128() ),
  2868. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[7] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[7] ) ) ), _mm_setzero_si128() ),
  2869. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[8] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[8] ) ) ), _mm_setzero_si128() ),
  2870. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[9] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[9] ) ) ), _mm_setzero_si128() ),
  2871. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[10] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[10] ) ) ), _mm_setzero_si128() ),
  2872. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[11] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[11] ) ) ), _mm_setzero_si128() ),
  2873. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[12] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[12] ) ) ), _mm_setzero_si128() ),
  2874. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[13] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[13] ) ) ), _mm_setzero_si128() ),
  2875. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<5>( mul ), g_alpha_SIMD[14] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<5>( mul ), g_alpha_SIMD[14] ) ) ), _mm_setzero_si128() ),
  2876. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<5>( mul ), g_alpha_SIMD[15] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<5>( mul ), g_alpha_SIMD[15] ) ) ), _mm_setzero_si128() )
  2877. };
  2878. // find selector
  2879. int err = std::numeric_limits<int>::max();
  2880. int sel;
  2881. for( int r=0; r<16; r++ )
  2882. {
  2883. __m128i err1, err2, minerr;
  2884. __m128i recVal16 = rangeMul[r];
  2885. int rangeErr;
  2886. err1 = _mm_sub_epi16( sr[0], recVal16 );
  2887. err2 = _mm_mullo_epi16( err1, err1 );
  2888. minerr = _mm_minpos_epu16( err2 );
  2889. rangeErr = _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2890. err1 = _mm_sub_epi16( sr[1], recVal16 );
  2891. err2 = _mm_mullo_epi16( err1, err1 );
  2892. minerr = _mm_minpos_epu16( err2 );
  2893. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2894. err1 = _mm_sub_epi16( sr[2], recVal16 );
  2895. err2 = _mm_mullo_epi16( err1, err1 );
  2896. minerr = _mm_minpos_epu16( err2 );
  2897. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2898. err1 = _mm_sub_epi16( sr[3], recVal16 );
  2899. err2 = _mm_mullo_epi16( err1, err1 );
  2900. minerr = _mm_minpos_epu16( err2 );
  2901. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2902. err1 = _mm_sub_epi16( sr[4], recVal16 );
  2903. err2 = _mm_mullo_epi16( err1, err1 );
  2904. minerr = _mm_minpos_epu16( err2 );
  2905. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2906. err1 = _mm_sub_epi16( sr[5], recVal16 );
  2907. err2 = _mm_mullo_epi16( err1, err1 );
  2908. minerr = _mm_minpos_epu16( err2 );
  2909. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2910. err1 = _mm_sub_epi16( sr[6], recVal16 );
  2911. err2 = _mm_mullo_epi16( err1, err1 );
  2912. minerr = _mm_minpos_epu16( err2 );
  2913. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2914. err1 = _mm_sub_epi16( sr[7], recVal16 );
  2915. err2 = _mm_mullo_epi16( err1, err1 );
  2916. minerr = _mm_minpos_epu16( err2 );
  2917. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2918. err1 = _mm_sub_epi16( sr[8], recVal16 );
  2919. err2 = _mm_mullo_epi16( err1, err1 );
  2920. minerr = _mm_minpos_epu16( err2 );
  2921. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2922. err1 = _mm_sub_epi16( sr[9], recVal16 );
  2923. err2 = _mm_mullo_epi16( err1, err1 );
  2924. minerr = _mm_minpos_epu16( err2 );
  2925. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2926. err1 = _mm_sub_epi16( sr[10], recVal16 );
  2927. err2 = _mm_mullo_epi16( err1, err1 );
  2928. minerr = _mm_minpos_epu16( err2 );
  2929. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2930. err1 = _mm_sub_epi16( sr[11], recVal16 );
  2931. err2 = _mm_mullo_epi16( err1, err1 );
  2932. minerr = _mm_minpos_epu16( err2 );
  2933. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2934. err1 = _mm_sub_epi16( sr[12], recVal16 );
  2935. err2 = _mm_mullo_epi16( err1, err1 );
  2936. minerr = _mm_minpos_epu16( err2 );
  2937. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2938. err1 = _mm_sub_epi16( sr[13], recVal16 );
  2939. err2 = _mm_mullo_epi16( err1, err1 );
  2940. minerr = _mm_minpos_epu16( err2 );
  2941. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2942. err1 = _mm_sub_epi16( sr[14], recVal16 );
  2943. err2 = _mm_mullo_epi16( err1, err1 );
  2944. minerr = _mm_minpos_epu16( err2 );
  2945. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2946. err1 = _mm_sub_epi16( sr[15], recVal16 );
  2947. err2 = _mm_mullo_epi16( err1, err1 );
  2948. minerr = _mm_minpos_epu16( err2 );
  2949. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2950. if( rangeErr < err )
  2951. {
  2952. err = rangeErr;
  2953. sel = r;
  2954. if( err == 0 ) break;
  2955. }
  2956. }
  2957. __m128i recVal16 = rangeMul[sel];
  2958. #endif
  2959. // find indices
  2960. __m128i err1, err2, minerr;
  2961. uint64_t idx = 0, tmp;
  2962. err1 = _mm_sub_epi16( sr[0], recVal16 );
  2963. err2 = _mm_mullo_epi16( err1, err1 );
  2964. minerr = _mm_minpos_epu16( err2 );
  2965. tmp = _mm_cvtsi128_si64( minerr );
  2966. idx |= ( tmp >> 16 ) << 15*3;
  2967. err1 = _mm_sub_epi16( sr[1], recVal16 );
  2968. err2 = _mm_mullo_epi16( err1, err1 );
  2969. minerr = _mm_minpos_epu16( err2 );
  2970. tmp = _mm_cvtsi128_si64( minerr );
  2971. idx |= ( tmp >> 16 ) << 14*3;
  2972. err1 = _mm_sub_epi16( sr[2], recVal16 );
  2973. err2 = _mm_mullo_epi16( err1, err1 );
  2974. minerr = _mm_minpos_epu16( err2 );
  2975. tmp = _mm_cvtsi128_si64( minerr );
  2976. idx |= ( tmp >> 16 ) << 13*3;
  2977. err1 = _mm_sub_epi16( sr[3], recVal16 );
  2978. err2 = _mm_mullo_epi16( err1, err1 );
  2979. minerr = _mm_minpos_epu16( err2 );
  2980. tmp = _mm_cvtsi128_si64( minerr );
  2981. idx |= ( tmp >> 16 ) << 12*3;
  2982. err1 = _mm_sub_epi16( sr[4], recVal16 );
  2983. err2 = _mm_mullo_epi16( err1, err1 );
  2984. minerr = _mm_minpos_epu16( err2 );
  2985. tmp = _mm_cvtsi128_si64( minerr );
  2986. idx |= ( tmp >> 16 ) << 11*3;
  2987. err1 = _mm_sub_epi16( sr[5], recVal16 );
  2988. err2 = _mm_mullo_epi16( err1, err1 );
  2989. minerr = _mm_minpos_epu16( err2 );
  2990. tmp = _mm_cvtsi128_si64( minerr );
  2991. idx |= ( tmp >> 16 ) << 10*3;
  2992. err1 = _mm_sub_epi16( sr[6], recVal16 );
  2993. err2 = _mm_mullo_epi16( err1, err1 );
  2994. minerr = _mm_minpos_epu16( err2 );
  2995. tmp = _mm_cvtsi128_si64( minerr );
  2996. idx |= ( tmp >> 16 ) << 9*3;
  2997. err1 = _mm_sub_epi16( sr[7], recVal16 );
  2998. err2 = _mm_mullo_epi16( err1, err1 );
  2999. minerr = _mm_minpos_epu16( err2 );
  3000. tmp = _mm_cvtsi128_si64( minerr );
  3001. idx |= ( tmp >> 16 ) << 8*3;
  3002. err1 = _mm_sub_epi16( sr[8], recVal16 );
  3003. err2 = _mm_mullo_epi16( err1, err1 );
  3004. minerr = _mm_minpos_epu16( err2 );
  3005. tmp = _mm_cvtsi128_si64( minerr );
  3006. idx |= ( tmp >> 16 ) << 7*3;
  3007. err1 = _mm_sub_epi16( sr[9], recVal16 );
  3008. err2 = _mm_mullo_epi16( err1, err1 );
  3009. minerr = _mm_minpos_epu16( err2 );
  3010. tmp = _mm_cvtsi128_si64( minerr );
  3011. idx |= ( tmp >> 16 ) << 6*3;
  3012. err1 = _mm_sub_epi16( sr[10], recVal16 );
  3013. err2 = _mm_mullo_epi16( err1, err1 );
  3014. minerr = _mm_minpos_epu16( err2 );
  3015. tmp = _mm_cvtsi128_si64( minerr );
  3016. idx |= ( tmp >> 16 ) << 5*3;
  3017. err1 = _mm_sub_epi16( sr[11], recVal16 );
  3018. err2 = _mm_mullo_epi16( err1, err1 );
  3019. minerr = _mm_minpos_epu16( err2 );
  3020. tmp = _mm_cvtsi128_si64( minerr );
  3021. idx |= ( tmp >> 16 ) << 4*3;
  3022. err1 = _mm_sub_epi16( sr[12], recVal16 );
  3023. err2 = _mm_mullo_epi16( err1, err1 );
  3024. minerr = _mm_minpos_epu16( err2 );
  3025. tmp = _mm_cvtsi128_si64( minerr );
  3026. idx |= ( tmp >> 16 ) << 3*3;
  3027. err1 = _mm_sub_epi16( sr[13], recVal16 );
  3028. err2 = _mm_mullo_epi16( err1, err1 );
  3029. minerr = _mm_minpos_epu16( err2 );
  3030. tmp = _mm_cvtsi128_si64( minerr );
  3031. idx |= ( tmp >> 16 ) << 2*3;
  3032. err1 = _mm_sub_epi16( sr[14], recVal16 );
  3033. err2 = _mm_mullo_epi16( err1, err1 );
  3034. minerr = _mm_minpos_epu16( err2 );
  3035. tmp = _mm_cvtsi128_si64( minerr );
  3036. idx |= ( tmp >> 16 ) << 1*3;
  3037. err1 = _mm_sub_epi16( sr[15], recVal16 );
  3038. err2 = _mm_mullo_epi16( err1, err1 );
  3039. minerr = _mm_minpos_epu16( err2 );
  3040. tmp = _mm_cvtsi128_si64( minerr );
  3041. idx |= ( tmp >> 16 ) << 0*3;
  3042. uint16_t rm[8];
  3043. _mm_storeu_si128( (__m128i*)rm, mul );
  3044. uint16_t sm = _mm_cvtsi128_si64( srcMid );
  3045. uint64_t d = ( uint64_t( sm ) << 56 ) |
  3046. ( uint64_t( rm[GetMulSel( sel )] ) << 52 ) |
  3047. ( uint64_t( sel ) << 48 ) |
  3048. idx;
  3049. return _bswap64( d );
  3050. #elif defined __ARM_NEON
  3051. int16x8_t srcMidWide, multipliers;
  3052. int srcMid;
  3053. uint8x16_t srcAlphaBlock = vld1q_u8( src );
  3054. {
  3055. if( checkSolid )
  3056. {
  3057. uint8_t ref = src[0];
  3058. uint8x16_t a0 = vdupq_n_u8( ref );
  3059. uint8x16_t r = vceqq_u8( srcAlphaBlock, a0 );
  3060. int64x2_t m = vreinterpretq_s64_u8( r );
  3061. if( m[0] == -1 && m[1] == -1 )
  3062. return ref;
  3063. }
  3064. // srcRange
  3065. #ifdef __aarch64__
  3066. uint8_t min = vminvq_u8( srcAlphaBlock );
  3067. uint8_t max = vmaxvq_u8( srcAlphaBlock );
  3068. uint8_t srcRange = max - min;
  3069. multipliers = vqaddq_s16( vshrq_n_s16( vqdmulhq_n_s16( g_alphaRange_NEON, srcRange ), 1 ), vdupq_n_s16( 1 ) );
  3070. srcMid = min + srcRange / 2;
  3071. srcMidWide = vdupq_n_s16( srcMid );
  3072. #else
  3073. uint8x8_t vmin = vpmin_u8( vget_low_u8( srcAlphaBlock ), vget_high_u8( srcAlphaBlock ) );
  3074. vmin = vpmin_u8( vmin, vmin );
  3075. vmin = vpmin_u8( vmin, vmin );
  3076. vmin = vpmin_u8( vmin, vmin );
  3077. uint8x8_t vmax = vpmax_u8( vget_low_u8( srcAlphaBlock ), vget_high_u8( srcAlphaBlock ) );
  3078. vmax = vpmax_u8( vmax, vmax );
  3079. vmax = vpmax_u8( vmax, vmax );
  3080. vmax = vpmax_u8( vmax, vmax );
  3081. int16x8_t srcRangeWide = vreinterpretq_s16_u16( vsubl_u8( vmax, vmin ) );
  3082. multipliers = vqaddq_s16( vshrq_n_s16( vqdmulhq_s16( g_alphaRange_NEON, srcRangeWide ), 1 ), vdupq_n_s16( 1 ) );
  3083. srcMidWide = vsraq_n_s16( vreinterpretq_s16_u16(vmovl_u8(vmin)), srcRangeWide, 1);
  3084. srcMid = vgetq_lane_s16( srcMidWide, 0 );
  3085. #endif
  3086. }
  3087. // calculate reconstructed values
  3088. #define EAC_APPLY_16X( m ) m( 0 ) m( 1 ) m( 2 ) m( 3 ) m( 4 ) m( 5 ) m( 6 ) m( 7 ) m( 8 ) m( 9 ) m( 10 ) m( 11 ) m( 12 ) m( 13 ) m( 14 ) m( 15 )
  3089. #define EAC_RECONSTRUCT_VALUE( n ) vqmovun_s16( vmlaq_s16( srcMidWide, g_alpha_NEON[n], WidenMultiplier_EAC_NEON<n>( multipliers ) ) ),
  3090. uint8x8_t recVals[16] = { EAC_APPLY_16X( EAC_RECONSTRUCT_VALUE ) };
  3091. // find selector
  3092. int err = std::numeric_limits<int>::max();
  3093. int sel = 0;
  3094. for( int r = 0; r < 16; r++ )
  3095. {
  3096. uint8x8_t recVal = recVals[r];
  3097. int rangeErr = 0;
  3098. #define EAC_ACCUMULATE_ERROR( n ) rangeErr += MinError_EAC_NEON( ErrorProbe_EAC_NEON<n>( recVal, srcAlphaBlock ) );
  3099. EAC_APPLY_16X( EAC_ACCUMULATE_ERROR )
  3100. if( rangeErr < err )
  3101. {
  3102. err = rangeErr;
  3103. sel = r;
  3104. if ( err == 0 ) break;
  3105. }
  3106. }
  3107. // combine results
  3108. uint64_t d = ( uint64_t( srcMid ) << 56 ) |
  3109. ( uint64_t( multipliers[GetMulSel( sel )] ) << 52 ) |
  3110. ( uint64_t( sel ) << 48);
  3111. // generate indices
  3112. uint8x8_t recVal = recVals[sel];
  3113. #define EAC_INSERT_INDEX(n) d |= MinErrorIndex_EAC_NEON<n>( recVal, srcAlphaBlock );
  3114. EAC_APPLY_16X( EAC_INSERT_INDEX )
  3115. return _bswap64( d );
  3116. #undef EAC_APPLY_16X
  3117. #undef EAC_INSERT_INDEX
  3118. #undef EAC_ACCUMULATE_ERROR
  3119. #undef EAC_RECONSTRUCT_VALUE
  3120. #else
  3121. if( checkSolid )
  3122. {
  3123. bool solid = true;
  3124. const uint8_t* ptr = src + 1;
  3125. const uint8_t ref = *src;
  3126. for( int i=1; i<16; i++ )
  3127. {
  3128. if( ref != *ptr++ )
  3129. {
  3130. solid = false;
  3131. break;
  3132. }
  3133. }
  3134. if( solid )
  3135. {
  3136. return ref;
  3137. }
  3138. }
  3139. uint8_t min = src[0];
  3140. uint8_t max = src[0];
  3141. for( int i=1; i<16; i++ )
  3142. {
  3143. if( min > src[i] ) min = src[i];
  3144. else if( max < src[i] ) max = src[i];
  3145. }
  3146. int srcRange = max - min;
  3147. int srcMid = min + srcRange / 2;
  3148. uint8_t buf[16][16];
  3149. int err = std::numeric_limits<int>::max();
  3150. int sel;
  3151. int selmul;
  3152. for( int r=0; r<16; r++ )
  3153. {
  3154. int mul = ( ( srcRange * g_alphaRange[r] ) >> 16 ) + 1;
  3155. int rangeErr = 0;
  3156. for( int i=0; i<16; i++ )
  3157. {
  3158. const auto srcVal = src[i];
  3159. int idx = 0;
  3160. const auto modVal = g_alpha[r][0] * mul;
  3161. const auto recVal = clampu8( srcMid + modVal );
  3162. int localErr = sq( srcVal - recVal );
  3163. if( localErr != 0 )
  3164. {
  3165. for( int j=1; j<8; j++ )
  3166. {
  3167. const auto modVal = g_alpha[r][j] * mul;
  3168. const auto recVal = clampu8( srcMid + modVal );
  3169. const auto errProbe = sq( srcVal - recVal );
  3170. if( errProbe < localErr )
  3171. {
  3172. localErr = errProbe;
  3173. idx = j;
  3174. }
  3175. }
  3176. }
  3177. buf[r][i] = idx;
  3178. rangeErr += localErr;
  3179. }
  3180. if( rangeErr < err )
  3181. {
  3182. err = rangeErr;
  3183. sel = r;
  3184. selmul = mul;
  3185. if( err == 0 ) break;
  3186. }
  3187. }
  3188. uint64_t d = ( uint64_t( srcMid ) << 56 ) |
  3189. ( uint64_t( selmul ) << 52 ) |
  3190. ( uint64_t( sel ) << 48 );
  3191. int offset = 45;
  3192. auto ptr = buf[sel];
  3193. for( int i=0; i<16; i++ )
  3194. {
  3195. d |= uint64_t( *ptr++ ) << offset;
  3196. offset -= 3;
  3197. }
  3198. return _bswap64( d );
  3199. #endif
  3200. }
  3201. void CompressEtc1Rgb( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width )
  3202. {
  3203. int w = 0;
  3204. uint32_t buf[4*4];
  3205. do
  3206. {
  3207. #ifdef __SSE4_1__
  3208. __m128 px0 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 0 ) ) );
  3209. __m128 px1 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 1 ) ) );
  3210. __m128 px2 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 2 ) ) );
  3211. __m128 px3 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 3 ) ) );
  3212. _MM_TRANSPOSE4_PS( px0, px1, px2, px3 );
  3213. _mm_store_si128( (__m128i*)(buf + 0), _mm_castps_si128( px0 ) );
  3214. _mm_store_si128( (__m128i*)(buf + 4), _mm_castps_si128( px1 ) );
  3215. _mm_store_si128( (__m128i*)(buf + 8), _mm_castps_si128( px2 ) );
  3216. _mm_store_si128( (__m128i*)(buf + 12), _mm_castps_si128( px3 ) );
  3217. src += 4;
  3218. #else
  3219. auto ptr = buf;
  3220. for( int x=0; x<4; x++ )
  3221. {
  3222. *ptr++ = *src;
  3223. src += width;
  3224. *ptr++ = *src;
  3225. src += width;
  3226. *ptr++ = *src;
  3227. src += width;
  3228. *ptr++ = *src;
  3229. src -= width * 3 - 1;
  3230. }
  3231. #endif
  3232. if( ++w == width/4 )
  3233. {
  3234. src += width * 3;
  3235. w = 0;
  3236. }
  3237. *dst++ = ProcessRGB( (uint8_t*)buf );
  3238. }
  3239. while( --blocks );
  3240. }
  3241. void CompressEtc1RgbDither( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width )
  3242. {
  3243. int w = 0;
  3244. uint32_t buf[4*4];
  3245. do
  3246. {
  3247. #ifdef __SSE4_1__
  3248. __m128 px0 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 0 ) ) );
  3249. __m128 px1 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 1 ) ) );
  3250. __m128 px2 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 2 ) ) );
  3251. __m128 px3 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 3 ) ) );
  3252. _MM_TRANSPOSE4_PS( px0, px1, px2, px3 );
  3253. # ifdef __AVX2__
  3254. DitherAvx2( (uint8_t*)buf, _mm_castps_si128( px0 ), _mm_castps_si128( px1 ), _mm_castps_si128( px2 ), _mm_castps_si128( px3 ) );
  3255. # else
  3256. _mm_store_si128( (__m128i*)(buf + 0), _mm_castps_si128( px0 ) );
  3257. _mm_store_si128( (__m128i*)(buf + 4), _mm_castps_si128( px1 ) );
  3258. _mm_store_si128( (__m128i*)(buf + 8), _mm_castps_si128( px2 ) );
  3259. _mm_store_si128( (__m128i*)(buf + 12), _mm_castps_si128( px3 ) );
  3260. Dither( (uint8_t*)buf );
  3261. # endif
  3262. src += 4;
  3263. #else
  3264. auto ptr = buf;
  3265. for( int x=0; x<4; x++ )
  3266. {
  3267. *ptr++ = *src;
  3268. src += width;
  3269. *ptr++ = *src;
  3270. src += width;
  3271. *ptr++ = *src;
  3272. src += width;
  3273. *ptr++ = *src;
  3274. src -= width * 3 - 1;
  3275. }
  3276. #endif
  3277. if( ++w == width/4 )
  3278. {
  3279. src += width * 3;
  3280. w = 0;
  3281. }
  3282. *dst++ = ProcessRGB( (uint8_t*)buf );
  3283. }
  3284. while( --blocks );
  3285. }
  3286. void CompressEtc2Rgb( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width, bool useHeuristics )
  3287. {
  3288. int w = 0;
  3289. uint32_t buf[4*4];
  3290. do
  3291. {
  3292. #ifdef __SSE4_1__
  3293. __m128 px0 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 0 ) ) );
  3294. __m128 px1 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 1 ) ) );
  3295. __m128 px2 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 2 ) ) );
  3296. __m128 px3 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 3 ) ) );
  3297. _MM_TRANSPOSE4_PS( px0, px1, px2, px3 );
  3298. _mm_store_si128( (__m128i*)(buf + 0), _mm_castps_si128( px0 ) );
  3299. _mm_store_si128( (__m128i*)(buf + 4), _mm_castps_si128( px1 ) );
  3300. _mm_store_si128( (__m128i*)(buf + 8), _mm_castps_si128( px2 ) );
  3301. _mm_store_si128( (__m128i*)(buf + 12), _mm_castps_si128( px3 ) );
  3302. src += 4;
  3303. #else
  3304. auto ptr = buf;
  3305. for( int x=0; x<4; x++ )
  3306. {
  3307. *ptr++ = *src;
  3308. src += width;
  3309. *ptr++ = *src;
  3310. src += width;
  3311. *ptr++ = *src;
  3312. src += width;
  3313. *ptr++ = *src;
  3314. src -= width * 3 - 1;
  3315. }
  3316. #endif
  3317. if( ++w == width/4 )
  3318. {
  3319. src += width * 3;
  3320. w = 0;
  3321. }
  3322. *dst++ = ProcessRGB_ETC2( (uint8_t*)buf, useHeuristics );
  3323. }
  3324. while( --blocks );
  3325. }
  3326. void CompressEtc2Rgba( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width, bool useHeuristics )
  3327. {
  3328. int w = 0;
  3329. uint32_t rgba[4*4];
  3330. uint8_t alpha[4*4];
  3331. do
  3332. {
  3333. #ifdef __SSE4_1__
  3334. __m128 px0 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 0 ) ) );
  3335. __m128 px1 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 1 ) ) );
  3336. __m128 px2 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 2 ) ) );
  3337. __m128 px3 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 3 ) ) );
  3338. _MM_TRANSPOSE4_PS( px0, px1, px2, px3 );
  3339. __m128i c0 = _mm_castps_si128( px0 );
  3340. __m128i c1 = _mm_castps_si128( px1 );
  3341. __m128i c2 = _mm_castps_si128( px2 );
  3342. __m128i c3 = _mm_castps_si128( px3 );
  3343. _mm_store_si128( (__m128i*)(rgba + 0), c0 );
  3344. _mm_store_si128( (__m128i*)(rgba + 4), c1 );
  3345. _mm_store_si128( (__m128i*)(rgba + 8), c2 );
  3346. _mm_store_si128( (__m128i*)(rgba + 12), c3 );
  3347. __m128i mask = _mm_setr_epi32( 0x0f0b0703, -1, -1, -1 );
  3348. __m128i a0 = _mm_shuffle_epi8( c0, mask );
  3349. __m128i a1 = _mm_shuffle_epi8( c1, _mm_shuffle_epi32( mask, _MM_SHUFFLE( 3, 3, 0, 3 ) ) );
  3350. __m128i a2 = _mm_shuffle_epi8( c2, _mm_shuffle_epi32( mask, _MM_SHUFFLE( 3, 0, 3, 3 ) ) );
  3351. __m128i a3 = _mm_shuffle_epi8( c3, _mm_shuffle_epi32( mask, _MM_SHUFFLE( 0, 3, 3, 3 ) ) );
  3352. __m128i s0 = _mm_or_si128( a0, a1 );
  3353. __m128i s1 = _mm_or_si128( a2, a3 );
  3354. __m128i s2 = _mm_or_si128( s0, s1 );
  3355. _mm_store_si128( (__m128i*)alpha, s2 );
  3356. src += 4;
  3357. #else
  3358. auto ptr = rgba;
  3359. auto ptr8 = alpha;
  3360. for( int x=0; x<4; x++ )
  3361. {
  3362. auto v = *src;
  3363. *ptr++ = v;
  3364. *ptr8++ = v >> 24;
  3365. src += width;
  3366. v = *src;
  3367. *ptr++ = v;
  3368. *ptr8++ = v >> 24;
  3369. src += width;
  3370. v = *src;
  3371. *ptr++ = v;
  3372. *ptr8++ = v >> 24;
  3373. src += width;
  3374. v = *src;
  3375. *ptr++ = v;
  3376. *ptr8++ = v >> 24;
  3377. src -= width * 3 - 1;
  3378. }
  3379. #endif
  3380. if( ++w == width/4 )
  3381. {
  3382. src += width * 3;
  3383. w = 0;
  3384. }
  3385. *dst++ = ProcessAlpha_ETC2<true>( alpha );
  3386. *dst++ = ProcessRGB_ETC2( (uint8_t*)rgba, useHeuristics );
  3387. }
  3388. while( --blocks );
  3389. }
  3390. void CompressEacR( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width )
  3391. {
  3392. int w = 0;
  3393. uint8_t r[4*4];
  3394. do
  3395. {
  3396. #ifdef __SSE4_1__
  3397. __m128 px0 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 0 ) ) );
  3398. __m128 px1 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 1 ) ) );
  3399. __m128 px2 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 2 ) ) );
  3400. __m128 px3 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 3 ) ) );
  3401. _MM_TRANSPOSE4_PS( px0, px1, px2, px3 );
  3402. __m128i c0 = _mm_castps_si128( px0 );
  3403. __m128i c1 = _mm_castps_si128( px1 );
  3404. __m128i c2 = _mm_castps_si128( px2 );
  3405. __m128i c3 = _mm_castps_si128( px3 );
  3406. __m128i mask = _mm_setr_epi32( 0x0e0a0602, -1, -1, -1 );
  3407. __m128i a0 = _mm_shuffle_epi8( c0, mask );
  3408. __m128i a1 = _mm_shuffle_epi8( c1, _mm_shuffle_epi32( mask, _MM_SHUFFLE( 3, 3, 0, 3 ) ) );
  3409. __m128i a2 = _mm_shuffle_epi8( c2, _mm_shuffle_epi32( mask, _MM_SHUFFLE( 3, 0, 3, 3 ) ) );
  3410. __m128i a3 = _mm_shuffle_epi8( c3, _mm_shuffle_epi32( mask, _MM_SHUFFLE( 0, 3, 3, 3 ) ) );
  3411. __m128i s0 = _mm_or_si128( a0, a1 );
  3412. __m128i s1 = _mm_or_si128( a2, a3 );
  3413. __m128i s2 = _mm_or_si128( s0, s1 );
  3414. _mm_store_si128( (__m128i*)r, s2 );
  3415. src += 4;
  3416. #else
  3417. auto ptr8 = r;
  3418. for( int x=0; x<4; x++ )
  3419. {
  3420. auto v = *src;
  3421. *ptr8++ = (v & 0xff0000) >> 16;
  3422. src += width;
  3423. v = *src;
  3424. *ptr8++ = (v & 0xff0000) >> 16;
  3425. src += width;
  3426. v = *src;
  3427. *ptr8++ = (v & 0xff0000) >> 16;
  3428. src += width;
  3429. v = *src;
  3430. *ptr8++ = (v & 0xff0000) >> 16;
  3431. src -= width * 3 - 1;
  3432. }
  3433. #endif
  3434. if( ++w == width/4 )
  3435. {
  3436. src += width * 3;
  3437. w = 0;
  3438. }
  3439. *dst++ = ProcessAlpha_ETC2<false>( r );
  3440. }
  3441. while( --blocks );
  3442. }
  3443. void CompressEacRg( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width )
  3444. {
  3445. int w = 0;
  3446. uint8_t rg[4*4*2];
  3447. do
  3448. {
  3449. #ifdef __SSE4_1__
  3450. __m128 px0 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 0 ) ) );
  3451. __m128 px1 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 1 ) ) );
  3452. __m128 px2 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 2 ) ) );
  3453. __m128 px3 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 3 ) ) );
  3454. _MM_TRANSPOSE4_PS( px0, px1, px2, px3 );
  3455. __m128i c0 = _mm_castps_si128( px0 );
  3456. __m128i c1 = _mm_castps_si128( px1 );
  3457. __m128i c2 = _mm_castps_si128( px2 );
  3458. __m128i c3 = _mm_castps_si128( px3 );
  3459. __m128i mask = _mm_setr_epi32( 0x0e0a0602, -1, -1, -1 );
  3460. __m128i r0 = _mm_shuffle_epi8( c0, mask );
  3461. __m128i r1 = _mm_shuffle_epi8( c1, _mm_shuffle_epi32( mask, _MM_SHUFFLE( 3, 3, 0, 3 ) ) );
  3462. __m128i r2 = _mm_shuffle_epi8( c2, _mm_shuffle_epi32( mask, _MM_SHUFFLE( 3, 0, 3, 3 ) ) );
  3463. __m128i r3 = _mm_shuffle_epi8( c3, _mm_shuffle_epi32( mask, _MM_SHUFFLE( 0, 3, 3, 3 ) ) );
  3464. __m128i s0 = _mm_or_si128( r0, r1 );
  3465. __m128i s1 = _mm_or_si128( r2, r3 );
  3466. __m128i s2 = _mm_or_si128( s0, s1 );
  3467. _mm_store_si128( (__m128i*)rg, s2 );
  3468. mask = _mm_setr_epi32( 0x0d090501, -1, -1, -1 );
  3469. r0 = _mm_shuffle_epi8( c0, mask );
  3470. r1 = _mm_shuffle_epi8( c1, _mm_shuffle_epi32( mask, _MM_SHUFFLE( 3, 3, 0, 3 ) ) );
  3471. r2 = _mm_shuffle_epi8( c2, _mm_shuffle_epi32( mask, _MM_SHUFFLE( 3, 0, 3, 3 ) ) );
  3472. r3 = _mm_shuffle_epi8( c3, _mm_shuffle_epi32( mask, _MM_SHUFFLE( 0, 3, 3, 3 ) ) );
  3473. s0 = _mm_or_si128( r0, r1 );
  3474. s1 = _mm_or_si128( r2, r3 );
  3475. s2 = _mm_or_si128( s0, s1 );
  3476. _mm_store_si128( (__m128i*)&rg[16], s2 );
  3477. src += 4;
  3478. #else
  3479. auto ptrr = rg;
  3480. auto ptrg = ptrr + 16;
  3481. for( int x=0; x<4; x++ )
  3482. {
  3483. auto v = *src;
  3484. *ptrr++ = (v & 0xff0000) >> 16;
  3485. *ptrg++ = (v & 0xff00) >> 8;
  3486. src += width;
  3487. v = *src;
  3488. *ptrr++ = (v & 0xff0000) >> 16;
  3489. *ptrg++ = (v & 0xff00) >> 8;
  3490. src += width;
  3491. v = *src;
  3492. *ptrr++ = (v & 0xff0000) >> 16;
  3493. *ptrg++ = (v & 0xff00) >> 8;
  3494. src += width;
  3495. v = *src;
  3496. *ptrr++ = (v & 0xff0000) >> 16;
  3497. *ptrg++ = (v & 0xff00) >> 8;
  3498. src -= width * 3 - 1;
  3499. }
  3500. #endif
  3501. if( ++w == width/4 )
  3502. {
  3503. src += width * 3;
  3504. w = 0;
  3505. }
  3506. *dst++ = ProcessAlpha_ETC2<false>( rg );
  3507. *dst++ = ProcessAlpha_ETC2<false>( &rg[16] );
  3508. }
  3509. while( --blocks );
  3510. }