jidctint.c 103 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626
  1. /*
  2. * jidctint.c
  3. *
  4. * This file was part of the Independent JPEG Group's software.
  5. * Copyright (C) 1991-1998, Thomas G. Lane.
  6. * Modification developed 2002-2009 by Guido Vollbeding.
  7. * libjpeg-turbo Modifications:
  8. * Copyright (C) 2015, D. R. Commander
  9. * For conditions of distribution and use, see the accompanying README file.
  10. *
  11. * This file contains a slow-but-accurate integer implementation of the
  12. * inverse DCT (Discrete Cosine Transform). In the IJG code, this routine
  13. * must also perform dequantization of the input coefficients.
  14. *
  15. * A 2-D IDCT can be done by 1-D IDCT on each column followed by 1-D IDCT
  16. * on each row (or vice versa, but it's more convenient to emit a row at
  17. * a time). Direct algorithms are also available, but they are much more
  18. * complex and seem not to be any faster when reduced to code.
  19. *
  20. * This implementation is based on an algorithm described in
  21. * C. Loeffler, A. Ligtenberg and G. Moschytz, "Practical Fast 1-D DCT
  22. * Algorithms with 11 Multiplications", Proc. Int'l. Conf. on Acoustics,
  23. * Speech, and Signal Processing 1989 (ICASSP '89), pp. 988-991.
  24. * The primary algorithm described there uses 11 multiplies and 29 adds.
  25. * We use their alternate method with 12 multiplies and 32 adds.
  26. * The advantage of this method is that no data path contains more than one
  27. * multiplication; this allows a very simple and accurate implementation in
  28. * scaled fixed-point arithmetic, with a minimal number of shifts.
  29. *
  30. * We also provide IDCT routines with various output sample block sizes for
  31. * direct resolution reduction or enlargement without additional resampling:
  32. * NxN (N=1...16) pixels for one 8x8 input DCT block.
  33. *
  34. * For N<8 we simply take the corresponding low-frequency coefficients of
  35. * the 8x8 input DCT block and apply an NxN point IDCT on the sub-block
  36. * to yield the downscaled outputs.
  37. * This can be seen as direct low-pass downsampling from the DCT domain
  38. * point of view rather than the usual spatial domain point of view,
  39. * yielding significant computational savings and results at least
  40. * as good as common bilinear (averaging) spatial downsampling.
  41. *
  42. * For N>8 we apply a partial NxN IDCT on the 8 input coefficients as
  43. * lower frequencies and higher frequencies assumed to be zero.
  44. * It turns out that the computational effort is similar to the 8x8 IDCT
  45. * regarding the output size.
  46. * Furthermore, the scaling and descaling is the same for all IDCT sizes.
  47. *
  48. * CAUTION: We rely on the FIX() macro except for the N=1,2,4,8 cases
  49. * since there would be too many additional constants to pre-calculate.
  50. */
  51. #define JPEG_INTERNALS
  52. #include "jinclude.h"
  53. #include "jpeglib.h"
  54. #include "jdct.h" /* Private declarations for DCT subsystem */
  55. #ifdef DCT_ISLOW_SUPPORTED
  56. /*
  57. * This module is specialized to the case DCTSIZE = 8.
  58. */
  59. #if DCTSIZE != 8
  60. Sorry, this code only copes with 8x8 DCT blocks. /* deliberate syntax err */
  61. #endif
  62. /*
  63. * The poop on this scaling stuff is as follows:
  64. *
  65. * Each 1-D IDCT step produces outputs which are a factor of sqrt(N)
  66. * larger than the true IDCT outputs. The final outputs are therefore
  67. * a factor of N larger than desired; since N=8 this can be cured by
  68. * a simple right shift at the end of the algorithm. The advantage of
  69. * this arrangement is that we save two multiplications per 1-D IDCT,
  70. * because the y0 and y4 inputs need not be divided by sqrt(N).
  71. *
  72. * We have to do addition and subtraction of the integer inputs, which
  73. * is no problem, and multiplication by fractional constants, which is
  74. * a problem to do in integer arithmetic. We multiply all the constants
  75. * by CONST_SCALE and convert them to integer constants (thus retaining
  76. * CONST_BITS bits of precision in the constants). After doing a
  77. * multiplication we have to divide the product by CONST_SCALE, with proper
  78. * rounding, to produce the correct output. This division can be done
  79. * cheaply as a right shift of CONST_BITS bits. We postpone shifting
  80. * as long as possible so that partial sums can be added together with
  81. * full fractional precision.
  82. *
  83. * The outputs of the first pass are scaled up by PASS1_BITS bits so that
  84. * they are represented to better-than-integral precision. These outputs
  85. * require BITS_IN_JSAMPLE + PASS1_BITS + 3 bits; this fits in a 16-bit word
  86. * with the recommended scaling. (To scale up 12-bit sample data further, an
  87. * intermediate INT32 array would be needed.)
  88. *
  89. * To avoid overflow of the 32-bit intermediate results in pass 2, we must
  90. * have BITS_IN_JSAMPLE + CONST_BITS + PASS1_BITS <= 26. Error analysis
  91. * shows that the values given below are the most effective.
  92. */
  93. #if BITS_IN_JSAMPLE == 8
  94. #define CONST_BITS 13
  95. #define PASS1_BITS 2
  96. #else
  97. #define CONST_BITS 13
  98. #define PASS1_BITS 1 /* lose a little precision to avoid overflow */
  99. #endif
  100. /* Some C compilers fail to reduce "FIX(constant)" at compile time, thus
  101. * causing a lot of useless floating-point operations at run time.
  102. * To get around this we use the following pre-calculated constants.
  103. * If you change CONST_BITS you may want to add appropriate values.
  104. * (With a reasonable C compiler, you can just rely on the FIX() macro...)
  105. */
  106. #if CONST_BITS == 13
  107. #define FIX_0_298631336 ((INT32) 2446) /* FIX(0.298631336) */
  108. #define FIX_0_390180644 ((INT32) 3196) /* FIX(0.390180644) */
  109. #define FIX_0_541196100 ((INT32) 4433) /* FIX(0.541196100) */
  110. #define FIX_0_765366865 ((INT32) 6270) /* FIX(0.765366865) */
  111. #define FIX_0_899976223 ((INT32) 7373) /* FIX(0.899976223) */
  112. #define FIX_1_175875602 ((INT32) 9633) /* FIX(1.175875602) */
  113. #define FIX_1_501321110 ((INT32) 12299) /* FIX(1.501321110) */
  114. #define FIX_1_847759065 ((INT32) 15137) /* FIX(1.847759065) */
  115. #define FIX_1_961570560 ((INT32) 16069) /* FIX(1.961570560) */
  116. #define FIX_2_053119869 ((INT32) 16819) /* FIX(2.053119869) */
  117. #define FIX_2_562915447 ((INT32) 20995) /* FIX(2.562915447) */
  118. #define FIX_3_072711026 ((INT32) 25172) /* FIX(3.072711026) */
  119. #else
  120. #define FIX_0_298631336 FIX(0.298631336)
  121. #define FIX_0_390180644 FIX(0.390180644)
  122. #define FIX_0_541196100 FIX(0.541196100)
  123. #define FIX_0_765366865 FIX(0.765366865)
  124. #define FIX_0_899976223 FIX(0.899976223)
  125. #define FIX_1_175875602 FIX(1.175875602)
  126. #define FIX_1_501321110 FIX(1.501321110)
  127. #define FIX_1_847759065 FIX(1.847759065)
  128. #define FIX_1_961570560 FIX(1.961570560)
  129. #define FIX_2_053119869 FIX(2.053119869)
  130. #define FIX_2_562915447 FIX(2.562915447)
  131. #define FIX_3_072711026 FIX(3.072711026)
  132. #endif
  133. /* Multiply an INT32 variable by an INT32 constant to yield an INT32 result.
  134. * For 8-bit samples with the recommended scaling, all the variable
  135. * and constant values involved are no more than 16 bits wide, so a
  136. * 16x16->32 bit multiply can be used instead of a full 32x32 multiply.
  137. * For 12-bit samples, a full 32-bit multiplication will be needed.
  138. */
  139. #if BITS_IN_JSAMPLE == 8
  140. #define MULTIPLY(var,const) MULTIPLY16C16(var,const)
  141. #else
  142. #define MULTIPLY(var,const) ((var) * (const))
  143. #endif
  144. /* Dequantize a coefficient by multiplying it by the multiplier-table
  145. * entry; produce an int result. In this module, both inputs and result
  146. * are 16 bits or less, so either int or short multiply will work.
  147. */
  148. #define DEQUANTIZE(coef,quantval) (((ISLOW_MULT_TYPE) (coef)) * (quantval))
  149. /*
  150. * Perform dequantization and inverse DCT on one block of coefficients.
  151. */
  152. GLOBAL(void)
  153. jpeg_idct_islow (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  154. JCOEFPTR coef_block,
  155. JSAMPARRAY output_buf, JDIMENSION output_col)
  156. {
  157. INT32 tmp0, tmp1, tmp2, tmp3;
  158. INT32 tmp10, tmp11, tmp12, tmp13;
  159. INT32 z1, z2, z3, z4, z5;
  160. JCOEFPTR inptr;
  161. ISLOW_MULT_TYPE * quantptr;
  162. int * wsptr;
  163. JSAMPROW outptr;
  164. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  165. int ctr;
  166. int workspace[DCTSIZE2]; /* buffers data between passes */
  167. SHIFT_TEMPS
  168. /* Pass 1: process columns from input, store into work array. */
  169. /* Note results are scaled up by sqrt(8) compared to a true IDCT; */
  170. /* furthermore, we scale the results by 2**PASS1_BITS. */
  171. inptr = coef_block;
  172. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  173. wsptr = workspace;
  174. for (ctr = DCTSIZE; ctr > 0; ctr--) {
  175. /* Due to quantization, we will usually find that many of the input
  176. * coefficients are zero, especially the AC terms. We can exploit this
  177. * by short-circuiting the IDCT calculation for any column in which all
  178. * the AC terms are zero. In that case each output is equal to the
  179. * DC coefficient (with scale factor as needed).
  180. * With typical images and quantization tables, half or more of the
  181. * column DCT calculations can be simplified this way.
  182. */
  183. if (inptr[DCTSIZE*1] == 0 && inptr[DCTSIZE*2] == 0 &&
  184. inptr[DCTSIZE*3] == 0 && inptr[DCTSIZE*4] == 0 &&
  185. inptr[DCTSIZE*5] == 0 && inptr[DCTSIZE*6] == 0 &&
  186. inptr[DCTSIZE*7] == 0) {
  187. /* AC terms all zero */
  188. int dcval = LEFT_SHIFT(DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]),
  189. PASS1_BITS);
  190. wsptr[DCTSIZE*0] = dcval;
  191. wsptr[DCTSIZE*1] = dcval;
  192. wsptr[DCTSIZE*2] = dcval;
  193. wsptr[DCTSIZE*3] = dcval;
  194. wsptr[DCTSIZE*4] = dcval;
  195. wsptr[DCTSIZE*5] = dcval;
  196. wsptr[DCTSIZE*6] = dcval;
  197. wsptr[DCTSIZE*7] = dcval;
  198. inptr++; /* advance pointers to next column */
  199. quantptr++;
  200. wsptr++;
  201. continue;
  202. }
  203. /* Even part: reverse the even part of the forward DCT. */
  204. /* The rotator is sqrt(2)*c(-6). */
  205. z2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  206. z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  207. z1 = MULTIPLY(z2 + z3, FIX_0_541196100);
  208. tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065);
  209. tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865);
  210. z2 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  211. z3 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  212. tmp0 = LEFT_SHIFT(z2 + z3, CONST_BITS);
  213. tmp1 = LEFT_SHIFT(z2 - z3, CONST_BITS);
  214. tmp10 = tmp0 + tmp3;
  215. tmp13 = tmp0 - tmp3;
  216. tmp11 = tmp1 + tmp2;
  217. tmp12 = tmp1 - tmp2;
  218. /* Odd part per figure 8; the matrix is unitary and hence its
  219. * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
  220. */
  221. tmp0 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  222. tmp1 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  223. tmp2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  224. tmp3 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  225. z1 = tmp0 + tmp3;
  226. z2 = tmp1 + tmp2;
  227. z3 = tmp0 + tmp2;
  228. z4 = tmp1 + tmp3;
  229. z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */
  230. tmp0 = MULTIPLY(tmp0, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */
  231. tmp1 = MULTIPLY(tmp1, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */
  232. tmp2 = MULTIPLY(tmp2, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */
  233. tmp3 = MULTIPLY(tmp3, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */
  234. z1 = MULTIPLY(z1, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */
  235. z2 = MULTIPLY(z2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */
  236. z3 = MULTIPLY(z3, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */
  237. z4 = MULTIPLY(z4, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */
  238. z3 += z5;
  239. z4 += z5;
  240. tmp0 += z1 + z3;
  241. tmp1 += z2 + z4;
  242. tmp2 += z2 + z3;
  243. tmp3 += z1 + z4;
  244. /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
  245. wsptr[DCTSIZE*0] = (int) DESCALE(tmp10 + tmp3, CONST_BITS-PASS1_BITS);
  246. wsptr[DCTSIZE*7] = (int) DESCALE(tmp10 - tmp3, CONST_BITS-PASS1_BITS);
  247. wsptr[DCTSIZE*1] = (int) DESCALE(tmp11 + tmp2, CONST_BITS-PASS1_BITS);
  248. wsptr[DCTSIZE*6] = (int) DESCALE(tmp11 - tmp2, CONST_BITS-PASS1_BITS);
  249. wsptr[DCTSIZE*2] = (int) DESCALE(tmp12 + tmp1, CONST_BITS-PASS1_BITS);
  250. wsptr[DCTSIZE*5] = (int) DESCALE(tmp12 - tmp1, CONST_BITS-PASS1_BITS);
  251. wsptr[DCTSIZE*3] = (int) DESCALE(tmp13 + tmp0, CONST_BITS-PASS1_BITS);
  252. wsptr[DCTSIZE*4] = (int) DESCALE(tmp13 - tmp0, CONST_BITS-PASS1_BITS);
  253. inptr++; /* advance pointers to next column */
  254. quantptr++;
  255. wsptr++;
  256. }
  257. /* Pass 2: process rows from work array, store into output array. */
  258. /* Note that we must descale the results by a factor of 8 == 2**3, */
  259. /* and also undo the PASS1_BITS scaling. */
  260. wsptr = workspace;
  261. for (ctr = 0; ctr < DCTSIZE; ctr++) {
  262. outptr = output_buf[ctr] + output_col;
  263. /* Rows of zeroes can be exploited in the same way as we did with columns.
  264. * However, the column calculation has created many nonzero AC terms, so
  265. * the simplification applies less often (typically 5% to 10% of the time).
  266. * On machines with very fast multiplication, it's possible that the
  267. * test takes more time than it's worth. In that case this section
  268. * may be commented out.
  269. */
  270. #ifndef NO_ZERO_ROW_TEST
  271. if (wsptr[1] == 0 && wsptr[2] == 0 && wsptr[3] == 0 && wsptr[4] == 0 &&
  272. wsptr[5] == 0 && wsptr[6] == 0 && wsptr[7] == 0) {
  273. /* AC terms all zero */
  274. JSAMPLE dcval = range_limit[(int) DESCALE((INT32) wsptr[0], PASS1_BITS+3)
  275. & RANGE_MASK];
  276. outptr[0] = dcval;
  277. outptr[1] = dcval;
  278. outptr[2] = dcval;
  279. outptr[3] = dcval;
  280. outptr[4] = dcval;
  281. outptr[5] = dcval;
  282. outptr[6] = dcval;
  283. outptr[7] = dcval;
  284. wsptr += DCTSIZE; /* advance pointer to next row */
  285. continue;
  286. }
  287. #endif
  288. /* Even part: reverse the even part of the forward DCT. */
  289. /* The rotator is sqrt(2)*c(-6). */
  290. z2 = (INT32) wsptr[2];
  291. z3 = (INT32) wsptr[6];
  292. z1 = MULTIPLY(z2 + z3, FIX_0_541196100);
  293. tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065);
  294. tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865);
  295. tmp0 = LEFT_SHIFT((INT32) wsptr[0] + (INT32) wsptr[4], CONST_BITS);
  296. tmp1 = LEFT_SHIFT((INT32) wsptr[0] - (INT32) wsptr[4], CONST_BITS);
  297. tmp10 = tmp0 + tmp3;
  298. tmp13 = tmp0 - tmp3;
  299. tmp11 = tmp1 + tmp2;
  300. tmp12 = tmp1 - tmp2;
  301. /* Odd part per figure 8; the matrix is unitary and hence its
  302. * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
  303. */
  304. tmp0 = (INT32) wsptr[7];
  305. tmp1 = (INT32) wsptr[5];
  306. tmp2 = (INT32) wsptr[3];
  307. tmp3 = (INT32) wsptr[1];
  308. z1 = tmp0 + tmp3;
  309. z2 = tmp1 + tmp2;
  310. z3 = tmp0 + tmp2;
  311. z4 = tmp1 + tmp3;
  312. z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */
  313. tmp0 = MULTIPLY(tmp0, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */
  314. tmp1 = MULTIPLY(tmp1, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */
  315. tmp2 = MULTIPLY(tmp2, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */
  316. tmp3 = MULTIPLY(tmp3, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */
  317. z1 = MULTIPLY(z1, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */
  318. z2 = MULTIPLY(z2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */
  319. z3 = MULTIPLY(z3, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */
  320. z4 = MULTIPLY(z4, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */
  321. z3 += z5;
  322. z4 += z5;
  323. tmp0 += z1 + z3;
  324. tmp1 += z2 + z4;
  325. tmp2 += z2 + z3;
  326. tmp3 += z1 + z4;
  327. /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
  328. outptr[0] = range_limit[(int) DESCALE(tmp10 + tmp3,
  329. CONST_BITS+PASS1_BITS+3)
  330. & RANGE_MASK];
  331. outptr[7] = range_limit[(int) DESCALE(tmp10 - tmp3,
  332. CONST_BITS+PASS1_BITS+3)
  333. & RANGE_MASK];
  334. outptr[1] = range_limit[(int) DESCALE(tmp11 + tmp2,
  335. CONST_BITS+PASS1_BITS+3)
  336. & RANGE_MASK];
  337. outptr[6] = range_limit[(int) DESCALE(tmp11 - tmp2,
  338. CONST_BITS+PASS1_BITS+3)
  339. & RANGE_MASK];
  340. outptr[2] = range_limit[(int) DESCALE(tmp12 + tmp1,
  341. CONST_BITS+PASS1_BITS+3)
  342. & RANGE_MASK];
  343. outptr[5] = range_limit[(int) DESCALE(tmp12 - tmp1,
  344. CONST_BITS+PASS1_BITS+3)
  345. & RANGE_MASK];
  346. outptr[3] = range_limit[(int) DESCALE(tmp13 + tmp0,
  347. CONST_BITS+PASS1_BITS+3)
  348. & RANGE_MASK];
  349. outptr[4] = range_limit[(int) DESCALE(tmp13 - tmp0,
  350. CONST_BITS+PASS1_BITS+3)
  351. & RANGE_MASK];
  352. wsptr += DCTSIZE; /* advance pointer to next row */
  353. }
  354. }
  355. #ifdef IDCT_SCALING_SUPPORTED
  356. /*
  357. * Perform dequantization and inverse DCT on one block of coefficients,
  358. * producing a 7x7 output block.
  359. *
  360. * Optimized algorithm with 12 multiplications in the 1-D kernel.
  361. * cK represents sqrt(2) * cos(K*pi/14).
  362. */
  363. GLOBAL(void)
  364. jpeg_idct_7x7 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  365. JCOEFPTR coef_block,
  366. JSAMPARRAY output_buf, JDIMENSION output_col)
  367. {
  368. INT32 tmp0, tmp1, tmp2, tmp10, tmp11, tmp12, tmp13;
  369. INT32 z1, z2, z3;
  370. JCOEFPTR inptr;
  371. ISLOW_MULT_TYPE * quantptr;
  372. int * wsptr;
  373. JSAMPROW outptr;
  374. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  375. int ctr;
  376. int workspace[7*7]; /* buffers data between passes */
  377. SHIFT_TEMPS
  378. /* Pass 1: process columns from input, store into work array. */
  379. inptr = coef_block;
  380. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  381. wsptr = workspace;
  382. for (ctr = 0; ctr < 7; ctr++, inptr++, quantptr++, wsptr++) {
  383. /* Even part */
  384. tmp13 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  385. tmp13 = LEFT_SHIFT(tmp13, CONST_BITS);
  386. /* Add fudge factor here for final descale. */
  387. tmp13 += ONE << (CONST_BITS-PASS1_BITS-1);
  388. z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  389. z2 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  390. z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  391. tmp10 = MULTIPLY(z2 - z3, FIX(0.881747734)); /* c4 */
  392. tmp12 = MULTIPLY(z1 - z2, FIX(0.314692123)); /* c6 */
  393. tmp11 = tmp10 + tmp12 + tmp13 - MULTIPLY(z2, FIX(1.841218003)); /* c2+c4-c6 */
  394. tmp0 = z1 + z3;
  395. z2 -= tmp0;
  396. tmp0 = MULTIPLY(tmp0, FIX(1.274162392)) + tmp13; /* c2 */
  397. tmp10 += tmp0 - MULTIPLY(z3, FIX(0.077722536)); /* c2-c4-c6 */
  398. tmp12 += tmp0 - MULTIPLY(z1, FIX(2.470602249)); /* c2+c4+c6 */
  399. tmp13 += MULTIPLY(z2, FIX(1.414213562)); /* c0 */
  400. /* Odd part */
  401. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  402. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  403. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  404. tmp1 = MULTIPLY(z1 + z2, FIX(0.935414347)); /* (c3+c1-c5)/2 */
  405. tmp2 = MULTIPLY(z1 - z2, FIX(0.170262339)); /* (c3+c5-c1)/2 */
  406. tmp0 = tmp1 - tmp2;
  407. tmp1 += tmp2;
  408. tmp2 = MULTIPLY(z2 + z3, - FIX(1.378756276)); /* -c1 */
  409. tmp1 += tmp2;
  410. z2 = MULTIPLY(z1 + z3, FIX(0.613604268)); /* c5 */
  411. tmp0 += z2;
  412. tmp2 += z2 + MULTIPLY(z3, FIX(1.870828693)); /* c3+c1-c5 */
  413. /* Final output stage */
  414. wsptr[7*0] = (int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS-PASS1_BITS);
  415. wsptr[7*6] = (int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS-PASS1_BITS);
  416. wsptr[7*1] = (int) RIGHT_SHIFT(tmp11 + tmp1, CONST_BITS-PASS1_BITS);
  417. wsptr[7*5] = (int) RIGHT_SHIFT(tmp11 - tmp1, CONST_BITS-PASS1_BITS);
  418. wsptr[7*2] = (int) RIGHT_SHIFT(tmp12 + tmp2, CONST_BITS-PASS1_BITS);
  419. wsptr[7*4] = (int) RIGHT_SHIFT(tmp12 - tmp2, CONST_BITS-PASS1_BITS);
  420. wsptr[7*3] = (int) RIGHT_SHIFT(tmp13, CONST_BITS-PASS1_BITS);
  421. }
  422. /* Pass 2: process 7 rows from work array, store into output array. */
  423. wsptr = workspace;
  424. for (ctr = 0; ctr < 7; ctr++) {
  425. outptr = output_buf[ctr] + output_col;
  426. /* Even part */
  427. /* Add fudge factor here for final descale. */
  428. tmp13 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  429. tmp13 = LEFT_SHIFT(tmp13, CONST_BITS);
  430. z1 = (INT32) wsptr[2];
  431. z2 = (INT32) wsptr[4];
  432. z3 = (INT32) wsptr[6];
  433. tmp10 = MULTIPLY(z2 - z3, FIX(0.881747734)); /* c4 */
  434. tmp12 = MULTIPLY(z1 - z2, FIX(0.314692123)); /* c6 */
  435. tmp11 = tmp10 + tmp12 + tmp13 - MULTIPLY(z2, FIX(1.841218003)); /* c2+c4-c6 */
  436. tmp0 = z1 + z3;
  437. z2 -= tmp0;
  438. tmp0 = MULTIPLY(tmp0, FIX(1.274162392)) + tmp13; /* c2 */
  439. tmp10 += tmp0 - MULTIPLY(z3, FIX(0.077722536)); /* c2-c4-c6 */
  440. tmp12 += tmp0 - MULTIPLY(z1, FIX(2.470602249)); /* c2+c4+c6 */
  441. tmp13 += MULTIPLY(z2, FIX(1.414213562)); /* c0 */
  442. /* Odd part */
  443. z1 = (INT32) wsptr[1];
  444. z2 = (INT32) wsptr[3];
  445. z3 = (INT32) wsptr[5];
  446. tmp1 = MULTIPLY(z1 + z2, FIX(0.935414347)); /* (c3+c1-c5)/2 */
  447. tmp2 = MULTIPLY(z1 - z2, FIX(0.170262339)); /* (c3+c5-c1)/2 */
  448. tmp0 = tmp1 - tmp2;
  449. tmp1 += tmp2;
  450. tmp2 = MULTIPLY(z2 + z3, - FIX(1.378756276)); /* -c1 */
  451. tmp1 += tmp2;
  452. z2 = MULTIPLY(z1 + z3, FIX(0.613604268)); /* c5 */
  453. tmp0 += z2;
  454. tmp2 += z2 + MULTIPLY(z3, FIX(1.870828693)); /* c3+c1-c5 */
  455. /* Final output stage */
  456. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0,
  457. CONST_BITS+PASS1_BITS+3)
  458. & RANGE_MASK];
  459. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0,
  460. CONST_BITS+PASS1_BITS+3)
  461. & RANGE_MASK];
  462. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp1,
  463. CONST_BITS+PASS1_BITS+3)
  464. & RANGE_MASK];
  465. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp1,
  466. CONST_BITS+PASS1_BITS+3)
  467. & RANGE_MASK];
  468. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp2,
  469. CONST_BITS+PASS1_BITS+3)
  470. & RANGE_MASK];
  471. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp2,
  472. CONST_BITS+PASS1_BITS+3)
  473. & RANGE_MASK];
  474. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp13,
  475. CONST_BITS+PASS1_BITS+3)
  476. & RANGE_MASK];
  477. wsptr += 7; /* advance pointer to next row */
  478. }
  479. }
  480. /*
  481. * Perform dequantization and inverse DCT on one block of coefficients,
  482. * producing a reduced-size 6x6 output block.
  483. *
  484. * Optimized algorithm with 3 multiplications in the 1-D kernel.
  485. * cK represents sqrt(2) * cos(K*pi/12).
  486. */
  487. GLOBAL(void)
  488. jpeg_idct_6x6 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  489. JCOEFPTR coef_block,
  490. JSAMPARRAY output_buf, JDIMENSION output_col)
  491. {
  492. INT32 tmp0, tmp1, tmp2, tmp10, tmp11, tmp12;
  493. INT32 z1, z2, z3;
  494. JCOEFPTR inptr;
  495. ISLOW_MULT_TYPE * quantptr;
  496. int * wsptr;
  497. JSAMPROW outptr;
  498. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  499. int ctr;
  500. int workspace[6*6]; /* buffers data between passes */
  501. SHIFT_TEMPS
  502. /* Pass 1: process columns from input, store into work array. */
  503. inptr = coef_block;
  504. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  505. wsptr = workspace;
  506. for (ctr = 0; ctr < 6; ctr++, inptr++, quantptr++, wsptr++) {
  507. /* Even part */
  508. tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  509. tmp0 = LEFT_SHIFT(tmp0, CONST_BITS);
  510. /* Add fudge factor here for final descale. */
  511. tmp0 += ONE << (CONST_BITS-PASS1_BITS-1);
  512. tmp2 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  513. tmp10 = MULTIPLY(tmp2, FIX(0.707106781)); /* c4 */
  514. tmp1 = tmp0 + tmp10;
  515. tmp11 = RIGHT_SHIFT(tmp0 - tmp10 - tmp10, CONST_BITS-PASS1_BITS);
  516. tmp10 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  517. tmp0 = MULTIPLY(tmp10, FIX(1.224744871)); /* c2 */
  518. tmp10 = tmp1 + tmp0;
  519. tmp12 = tmp1 - tmp0;
  520. /* Odd part */
  521. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  522. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  523. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  524. tmp1 = MULTIPLY(z1 + z3, FIX(0.366025404)); /* c5 */
  525. tmp0 = tmp1 + LEFT_SHIFT(z1 + z2, CONST_BITS);
  526. tmp2 = tmp1 + LEFT_SHIFT(z3 - z2, CONST_BITS);
  527. tmp1 = LEFT_SHIFT(z1 - z2 - z3, PASS1_BITS);
  528. /* Final output stage */
  529. wsptr[6*0] = (int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS-PASS1_BITS);
  530. wsptr[6*5] = (int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS-PASS1_BITS);
  531. wsptr[6*1] = (int) (tmp11 + tmp1);
  532. wsptr[6*4] = (int) (tmp11 - tmp1);
  533. wsptr[6*2] = (int) RIGHT_SHIFT(tmp12 + tmp2, CONST_BITS-PASS1_BITS);
  534. wsptr[6*3] = (int) RIGHT_SHIFT(tmp12 - tmp2, CONST_BITS-PASS1_BITS);
  535. }
  536. /* Pass 2: process 6 rows from work array, store into output array. */
  537. wsptr = workspace;
  538. for (ctr = 0; ctr < 6; ctr++) {
  539. outptr = output_buf[ctr] + output_col;
  540. /* Even part */
  541. /* Add fudge factor here for final descale. */
  542. tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  543. tmp0 = LEFT_SHIFT(tmp0, CONST_BITS);
  544. tmp2 = (INT32) wsptr[4];
  545. tmp10 = MULTIPLY(tmp2, FIX(0.707106781)); /* c4 */
  546. tmp1 = tmp0 + tmp10;
  547. tmp11 = tmp0 - tmp10 - tmp10;
  548. tmp10 = (INT32) wsptr[2];
  549. tmp0 = MULTIPLY(tmp10, FIX(1.224744871)); /* c2 */
  550. tmp10 = tmp1 + tmp0;
  551. tmp12 = tmp1 - tmp0;
  552. /* Odd part */
  553. z1 = (INT32) wsptr[1];
  554. z2 = (INT32) wsptr[3];
  555. z3 = (INT32) wsptr[5];
  556. tmp1 = MULTIPLY(z1 + z3, FIX(0.366025404)); /* c5 */
  557. tmp0 = tmp1 + LEFT_SHIFT(z1 + z2, CONST_BITS);
  558. tmp2 = tmp1 + LEFT_SHIFT(z3 - z2, CONST_BITS);
  559. tmp1 = LEFT_SHIFT(z1 - z2 - z3, CONST_BITS);
  560. /* Final output stage */
  561. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0,
  562. CONST_BITS+PASS1_BITS+3)
  563. & RANGE_MASK];
  564. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0,
  565. CONST_BITS+PASS1_BITS+3)
  566. & RANGE_MASK];
  567. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp1,
  568. CONST_BITS+PASS1_BITS+3)
  569. & RANGE_MASK];
  570. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp1,
  571. CONST_BITS+PASS1_BITS+3)
  572. & RANGE_MASK];
  573. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp2,
  574. CONST_BITS+PASS1_BITS+3)
  575. & RANGE_MASK];
  576. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp2,
  577. CONST_BITS+PASS1_BITS+3)
  578. & RANGE_MASK];
  579. wsptr += 6; /* advance pointer to next row */
  580. }
  581. }
  582. /*
  583. * Perform dequantization and inverse DCT on one block of coefficients,
  584. * producing a reduced-size 5x5 output block.
  585. *
  586. * Optimized algorithm with 5 multiplications in the 1-D kernel.
  587. * cK represents sqrt(2) * cos(K*pi/10).
  588. */
  589. GLOBAL(void)
  590. jpeg_idct_5x5 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  591. JCOEFPTR coef_block,
  592. JSAMPARRAY output_buf, JDIMENSION output_col)
  593. {
  594. INT32 tmp0, tmp1, tmp10, tmp11, tmp12;
  595. INT32 z1, z2, z3;
  596. JCOEFPTR inptr;
  597. ISLOW_MULT_TYPE * quantptr;
  598. int * wsptr;
  599. JSAMPROW outptr;
  600. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  601. int ctr;
  602. int workspace[5*5]; /* buffers data between passes */
  603. SHIFT_TEMPS
  604. /* Pass 1: process columns from input, store into work array. */
  605. inptr = coef_block;
  606. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  607. wsptr = workspace;
  608. for (ctr = 0; ctr < 5; ctr++, inptr++, quantptr++, wsptr++) {
  609. /* Even part */
  610. tmp12 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  611. tmp12 = LEFT_SHIFT(tmp12, CONST_BITS);
  612. /* Add fudge factor here for final descale. */
  613. tmp12 += ONE << (CONST_BITS-PASS1_BITS-1);
  614. tmp0 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  615. tmp1 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  616. z1 = MULTIPLY(tmp0 + tmp1, FIX(0.790569415)); /* (c2+c4)/2 */
  617. z2 = MULTIPLY(tmp0 - tmp1, FIX(0.353553391)); /* (c2-c4)/2 */
  618. z3 = tmp12 + z2;
  619. tmp10 = z3 + z1;
  620. tmp11 = z3 - z1;
  621. tmp12 -= LEFT_SHIFT(z2, 2);
  622. /* Odd part */
  623. z2 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  624. z3 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  625. z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c3 */
  626. tmp0 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c1-c3 */
  627. tmp1 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c1+c3 */
  628. /* Final output stage */
  629. wsptr[5*0] = (int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS-PASS1_BITS);
  630. wsptr[5*4] = (int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS-PASS1_BITS);
  631. wsptr[5*1] = (int) RIGHT_SHIFT(tmp11 + tmp1, CONST_BITS-PASS1_BITS);
  632. wsptr[5*3] = (int) RIGHT_SHIFT(tmp11 - tmp1, CONST_BITS-PASS1_BITS);
  633. wsptr[5*2] = (int) RIGHT_SHIFT(tmp12, CONST_BITS-PASS1_BITS);
  634. }
  635. /* Pass 2: process 5 rows from work array, store into output array. */
  636. wsptr = workspace;
  637. for (ctr = 0; ctr < 5; ctr++) {
  638. outptr = output_buf[ctr] + output_col;
  639. /* Even part */
  640. /* Add fudge factor here for final descale. */
  641. tmp12 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  642. tmp12 = LEFT_SHIFT(tmp12, CONST_BITS);
  643. tmp0 = (INT32) wsptr[2];
  644. tmp1 = (INT32) wsptr[4];
  645. z1 = MULTIPLY(tmp0 + tmp1, FIX(0.790569415)); /* (c2+c4)/2 */
  646. z2 = MULTIPLY(tmp0 - tmp1, FIX(0.353553391)); /* (c2-c4)/2 */
  647. z3 = tmp12 + z2;
  648. tmp10 = z3 + z1;
  649. tmp11 = z3 - z1;
  650. tmp12 -= LEFT_SHIFT(z2, 2);
  651. /* Odd part */
  652. z2 = (INT32) wsptr[1];
  653. z3 = (INT32) wsptr[3];
  654. z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c3 */
  655. tmp0 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c1-c3 */
  656. tmp1 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c1+c3 */
  657. /* Final output stage */
  658. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0,
  659. CONST_BITS+PASS1_BITS+3)
  660. & RANGE_MASK];
  661. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0,
  662. CONST_BITS+PASS1_BITS+3)
  663. & RANGE_MASK];
  664. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp1,
  665. CONST_BITS+PASS1_BITS+3)
  666. & RANGE_MASK];
  667. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp1,
  668. CONST_BITS+PASS1_BITS+3)
  669. & RANGE_MASK];
  670. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12,
  671. CONST_BITS+PASS1_BITS+3)
  672. & RANGE_MASK];
  673. wsptr += 5; /* advance pointer to next row */
  674. }
  675. }
  676. /*
  677. * Perform dequantization and inverse DCT on one block of coefficients,
  678. * producing a reduced-size 3x3 output block.
  679. *
  680. * Optimized algorithm with 2 multiplications in the 1-D kernel.
  681. * cK represents sqrt(2) * cos(K*pi/6).
  682. */
  683. GLOBAL(void)
  684. jpeg_idct_3x3 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  685. JCOEFPTR coef_block,
  686. JSAMPARRAY output_buf, JDIMENSION output_col)
  687. {
  688. INT32 tmp0, tmp2, tmp10, tmp12;
  689. JCOEFPTR inptr;
  690. ISLOW_MULT_TYPE * quantptr;
  691. int * wsptr;
  692. JSAMPROW outptr;
  693. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  694. int ctr;
  695. int workspace[3*3]; /* buffers data between passes */
  696. SHIFT_TEMPS
  697. /* Pass 1: process columns from input, store into work array. */
  698. inptr = coef_block;
  699. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  700. wsptr = workspace;
  701. for (ctr = 0; ctr < 3; ctr++, inptr++, quantptr++, wsptr++) {
  702. /* Even part */
  703. tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  704. tmp0 = LEFT_SHIFT(tmp0, CONST_BITS);
  705. /* Add fudge factor here for final descale. */
  706. tmp0 += ONE << (CONST_BITS-PASS1_BITS-1);
  707. tmp2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  708. tmp12 = MULTIPLY(tmp2, FIX(0.707106781)); /* c2 */
  709. tmp10 = tmp0 + tmp12;
  710. tmp2 = tmp0 - tmp12 - tmp12;
  711. /* Odd part */
  712. tmp12 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  713. tmp0 = MULTIPLY(tmp12, FIX(1.224744871)); /* c1 */
  714. /* Final output stage */
  715. wsptr[3*0] = (int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS-PASS1_BITS);
  716. wsptr[3*2] = (int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS-PASS1_BITS);
  717. wsptr[3*1] = (int) RIGHT_SHIFT(tmp2, CONST_BITS-PASS1_BITS);
  718. }
  719. /* Pass 2: process 3 rows from work array, store into output array. */
  720. wsptr = workspace;
  721. for (ctr = 0; ctr < 3; ctr++) {
  722. outptr = output_buf[ctr] + output_col;
  723. /* Even part */
  724. /* Add fudge factor here for final descale. */
  725. tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  726. tmp0 = LEFT_SHIFT(tmp0, CONST_BITS);
  727. tmp2 = (INT32) wsptr[2];
  728. tmp12 = MULTIPLY(tmp2, FIX(0.707106781)); /* c2 */
  729. tmp10 = tmp0 + tmp12;
  730. tmp2 = tmp0 - tmp12 - tmp12;
  731. /* Odd part */
  732. tmp12 = (INT32) wsptr[1];
  733. tmp0 = MULTIPLY(tmp12, FIX(1.224744871)); /* c1 */
  734. /* Final output stage */
  735. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0,
  736. CONST_BITS+PASS1_BITS+3)
  737. & RANGE_MASK];
  738. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0,
  739. CONST_BITS+PASS1_BITS+3)
  740. & RANGE_MASK];
  741. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp2,
  742. CONST_BITS+PASS1_BITS+3)
  743. & RANGE_MASK];
  744. wsptr += 3; /* advance pointer to next row */
  745. }
  746. }
  747. /*
  748. * Perform dequantization and inverse DCT on one block of coefficients,
  749. * producing a 9x9 output block.
  750. *
  751. * Optimized algorithm with 10 multiplications in the 1-D kernel.
  752. * cK represents sqrt(2) * cos(K*pi/18).
  753. */
  754. GLOBAL(void)
  755. jpeg_idct_9x9 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  756. JCOEFPTR coef_block,
  757. JSAMPARRAY output_buf, JDIMENSION output_col)
  758. {
  759. INT32 tmp0, tmp1, tmp2, tmp3, tmp10, tmp11, tmp12, tmp13, tmp14;
  760. INT32 z1, z2, z3, z4;
  761. JCOEFPTR inptr;
  762. ISLOW_MULT_TYPE * quantptr;
  763. int * wsptr;
  764. JSAMPROW outptr;
  765. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  766. int ctr;
  767. int workspace[8*9]; /* buffers data between passes */
  768. SHIFT_TEMPS
  769. /* Pass 1: process columns from input, store into work array. */
  770. inptr = coef_block;
  771. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  772. wsptr = workspace;
  773. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  774. /* Even part */
  775. tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  776. tmp0 = LEFT_SHIFT(tmp0, CONST_BITS);
  777. /* Add fudge factor here for final descale. */
  778. tmp0 += ONE << (CONST_BITS-PASS1_BITS-1);
  779. z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  780. z2 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  781. z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  782. tmp3 = MULTIPLY(z3, FIX(0.707106781)); /* c6 */
  783. tmp1 = tmp0 + tmp3;
  784. tmp2 = tmp0 - tmp3 - tmp3;
  785. tmp0 = MULTIPLY(z1 - z2, FIX(0.707106781)); /* c6 */
  786. tmp11 = tmp2 + tmp0;
  787. tmp14 = tmp2 - tmp0 - tmp0;
  788. tmp0 = MULTIPLY(z1 + z2, FIX(1.328926049)); /* c2 */
  789. tmp2 = MULTIPLY(z1, FIX(1.083350441)); /* c4 */
  790. tmp3 = MULTIPLY(z2, FIX(0.245575608)); /* c8 */
  791. tmp10 = tmp1 + tmp0 - tmp3;
  792. tmp12 = tmp1 - tmp0 + tmp2;
  793. tmp13 = tmp1 - tmp2 + tmp3;
  794. /* Odd part */
  795. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  796. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  797. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  798. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  799. z2 = MULTIPLY(z2, - FIX(1.224744871)); /* -c3 */
  800. tmp2 = MULTIPLY(z1 + z3, FIX(0.909038955)); /* c5 */
  801. tmp3 = MULTIPLY(z1 + z4, FIX(0.483689525)); /* c7 */
  802. tmp0 = tmp2 + tmp3 - z2;
  803. tmp1 = MULTIPLY(z3 - z4, FIX(1.392728481)); /* c1 */
  804. tmp2 += z2 - tmp1;
  805. tmp3 += z2 + tmp1;
  806. tmp1 = MULTIPLY(z1 - z3 - z4, FIX(1.224744871)); /* c3 */
  807. /* Final output stage */
  808. wsptr[8*0] = (int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS-PASS1_BITS);
  809. wsptr[8*8] = (int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS-PASS1_BITS);
  810. wsptr[8*1] = (int) RIGHT_SHIFT(tmp11 + tmp1, CONST_BITS-PASS1_BITS);
  811. wsptr[8*7] = (int) RIGHT_SHIFT(tmp11 - tmp1, CONST_BITS-PASS1_BITS);
  812. wsptr[8*2] = (int) RIGHT_SHIFT(tmp12 + tmp2, CONST_BITS-PASS1_BITS);
  813. wsptr[8*6] = (int) RIGHT_SHIFT(tmp12 - tmp2, CONST_BITS-PASS1_BITS);
  814. wsptr[8*3] = (int) RIGHT_SHIFT(tmp13 + tmp3, CONST_BITS-PASS1_BITS);
  815. wsptr[8*5] = (int) RIGHT_SHIFT(tmp13 - tmp3, CONST_BITS-PASS1_BITS);
  816. wsptr[8*4] = (int) RIGHT_SHIFT(tmp14, CONST_BITS-PASS1_BITS);
  817. }
  818. /* Pass 2: process 9 rows from work array, store into output array. */
  819. wsptr = workspace;
  820. for (ctr = 0; ctr < 9; ctr++) {
  821. outptr = output_buf[ctr] + output_col;
  822. /* Even part */
  823. /* Add fudge factor here for final descale. */
  824. tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  825. tmp0 = LEFT_SHIFT(tmp0, CONST_BITS);
  826. z1 = (INT32) wsptr[2];
  827. z2 = (INT32) wsptr[4];
  828. z3 = (INT32) wsptr[6];
  829. tmp3 = MULTIPLY(z3, FIX(0.707106781)); /* c6 */
  830. tmp1 = tmp0 + tmp3;
  831. tmp2 = tmp0 - tmp3 - tmp3;
  832. tmp0 = MULTIPLY(z1 - z2, FIX(0.707106781)); /* c6 */
  833. tmp11 = tmp2 + tmp0;
  834. tmp14 = tmp2 - tmp0 - tmp0;
  835. tmp0 = MULTIPLY(z1 + z2, FIX(1.328926049)); /* c2 */
  836. tmp2 = MULTIPLY(z1, FIX(1.083350441)); /* c4 */
  837. tmp3 = MULTIPLY(z2, FIX(0.245575608)); /* c8 */
  838. tmp10 = tmp1 + tmp0 - tmp3;
  839. tmp12 = tmp1 - tmp0 + tmp2;
  840. tmp13 = tmp1 - tmp2 + tmp3;
  841. /* Odd part */
  842. z1 = (INT32) wsptr[1];
  843. z2 = (INT32) wsptr[3];
  844. z3 = (INT32) wsptr[5];
  845. z4 = (INT32) wsptr[7];
  846. z2 = MULTIPLY(z2, - FIX(1.224744871)); /* -c3 */
  847. tmp2 = MULTIPLY(z1 + z3, FIX(0.909038955)); /* c5 */
  848. tmp3 = MULTIPLY(z1 + z4, FIX(0.483689525)); /* c7 */
  849. tmp0 = tmp2 + tmp3 - z2;
  850. tmp1 = MULTIPLY(z3 - z4, FIX(1.392728481)); /* c1 */
  851. tmp2 += z2 - tmp1;
  852. tmp3 += z2 + tmp1;
  853. tmp1 = MULTIPLY(z1 - z3 - z4, FIX(1.224744871)); /* c3 */
  854. /* Final output stage */
  855. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0,
  856. CONST_BITS+PASS1_BITS+3)
  857. & RANGE_MASK];
  858. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0,
  859. CONST_BITS+PASS1_BITS+3)
  860. & RANGE_MASK];
  861. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp1,
  862. CONST_BITS+PASS1_BITS+3)
  863. & RANGE_MASK];
  864. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp1,
  865. CONST_BITS+PASS1_BITS+3)
  866. & RANGE_MASK];
  867. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp2,
  868. CONST_BITS+PASS1_BITS+3)
  869. & RANGE_MASK];
  870. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp2,
  871. CONST_BITS+PASS1_BITS+3)
  872. & RANGE_MASK];
  873. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp13 + tmp3,
  874. CONST_BITS+PASS1_BITS+3)
  875. & RANGE_MASK];
  876. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp13 - tmp3,
  877. CONST_BITS+PASS1_BITS+3)
  878. & RANGE_MASK];
  879. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp14,
  880. CONST_BITS+PASS1_BITS+3)
  881. & RANGE_MASK];
  882. wsptr += 8; /* advance pointer to next row */
  883. }
  884. }
  885. /*
  886. * Perform dequantization and inverse DCT on one block of coefficients,
  887. * producing a 10x10 output block.
  888. *
  889. * Optimized algorithm with 12 multiplications in the 1-D kernel.
  890. * cK represents sqrt(2) * cos(K*pi/20).
  891. */
  892. GLOBAL(void)
  893. jpeg_idct_10x10 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  894. JCOEFPTR coef_block,
  895. JSAMPARRAY output_buf, JDIMENSION output_col)
  896. {
  897. INT32 tmp10, tmp11, tmp12, tmp13, tmp14;
  898. INT32 tmp20, tmp21, tmp22, tmp23, tmp24;
  899. INT32 z1, z2, z3, z4, z5;
  900. JCOEFPTR inptr;
  901. ISLOW_MULT_TYPE * quantptr;
  902. int * wsptr;
  903. JSAMPROW outptr;
  904. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  905. int ctr;
  906. int workspace[8*10]; /* buffers data between passes */
  907. SHIFT_TEMPS
  908. /* Pass 1: process columns from input, store into work array. */
  909. inptr = coef_block;
  910. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  911. wsptr = workspace;
  912. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  913. /* Even part */
  914. z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  915. z3 = LEFT_SHIFT(z3, CONST_BITS);
  916. /* Add fudge factor here for final descale. */
  917. z3 += ONE << (CONST_BITS-PASS1_BITS-1);
  918. z4 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  919. z1 = MULTIPLY(z4, FIX(1.144122806)); /* c4 */
  920. z2 = MULTIPLY(z4, FIX(0.437016024)); /* c8 */
  921. tmp10 = z3 + z1;
  922. tmp11 = z3 - z2;
  923. tmp22 = RIGHT_SHIFT(z3 - LEFT_SHIFT(z1 - z2, 1),
  924. CONST_BITS-PASS1_BITS); /* c0 = (c4-c8)*2 */
  925. z2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  926. z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  927. z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c6 */
  928. tmp12 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c2-c6 */
  929. tmp13 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c2+c6 */
  930. tmp20 = tmp10 + tmp12;
  931. tmp24 = tmp10 - tmp12;
  932. tmp21 = tmp11 + tmp13;
  933. tmp23 = tmp11 - tmp13;
  934. /* Odd part */
  935. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  936. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  937. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  938. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  939. tmp11 = z2 + z4;
  940. tmp13 = z2 - z4;
  941. tmp12 = MULTIPLY(tmp13, FIX(0.309016994)); /* (c3-c7)/2 */
  942. z5 = LEFT_SHIFT(z3, CONST_BITS);
  943. z2 = MULTIPLY(tmp11, FIX(0.951056516)); /* (c3+c7)/2 */
  944. z4 = z5 + tmp12;
  945. tmp10 = MULTIPLY(z1, FIX(1.396802247)) + z2 + z4; /* c1 */
  946. tmp14 = MULTIPLY(z1, FIX(0.221231742)) - z2 + z4; /* c9 */
  947. z2 = MULTIPLY(tmp11, FIX(0.587785252)); /* (c1-c9)/2 */
  948. z4 = z5 - tmp12 - LEFT_SHIFT(tmp13, CONST_BITS - 1);
  949. tmp12 = LEFT_SHIFT(z1 - tmp13 - z3, PASS1_BITS);
  950. tmp11 = MULTIPLY(z1, FIX(1.260073511)) - z2 - z4; /* c3 */
  951. tmp13 = MULTIPLY(z1, FIX(0.642039522)) - z2 + z4; /* c7 */
  952. /* Final output stage */
  953. wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS);
  954. wsptr[8*9] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS);
  955. wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS);
  956. wsptr[8*8] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS);
  957. wsptr[8*2] = (int) (tmp22 + tmp12);
  958. wsptr[8*7] = (int) (tmp22 - tmp12);
  959. wsptr[8*3] = (int) RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS-PASS1_BITS);
  960. wsptr[8*6] = (int) RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS-PASS1_BITS);
  961. wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS);
  962. wsptr[8*5] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS);
  963. }
  964. /* Pass 2: process 10 rows from work array, store into output array. */
  965. wsptr = workspace;
  966. for (ctr = 0; ctr < 10; ctr++) {
  967. outptr = output_buf[ctr] + output_col;
  968. /* Even part */
  969. /* Add fudge factor here for final descale. */
  970. z3 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  971. z3 = LEFT_SHIFT(z3, CONST_BITS);
  972. z4 = (INT32) wsptr[4];
  973. z1 = MULTIPLY(z4, FIX(1.144122806)); /* c4 */
  974. z2 = MULTIPLY(z4, FIX(0.437016024)); /* c8 */
  975. tmp10 = z3 + z1;
  976. tmp11 = z3 - z2;
  977. tmp22 = z3 - LEFT_SHIFT(z1 - z2, 1); /* c0 = (c4-c8)*2 */
  978. z2 = (INT32) wsptr[2];
  979. z3 = (INT32) wsptr[6];
  980. z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c6 */
  981. tmp12 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c2-c6 */
  982. tmp13 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c2+c6 */
  983. tmp20 = tmp10 + tmp12;
  984. tmp24 = tmp10 - tmp12;
  985. tmp21 = tmp11 + tmp13;
  986. tmp23 = tmp11 - tmp13;
  987. /* Odd part */
  988. z1 = (INT32) wsptr[1];
  989. z2 = (INT32) wsptr[3];
  990. z3 = (INT32) wsptr[5];
  991. z3 = LEFT_SHIFT(z3, CONST_BITS);
  992. z4 = (INT32) wsptr[7];
  993. tmp11 = z2 + z4;
  994. tmp13 = z2 - z4;
  995. tmp12 = MULTIPLY(tmp13, FIX(0.309016994)); /* (c3-c7)/2 */
  996. z2 = MULTIPLY(tmp11, FIX(0.951056516)); /* (c3+c7)/2 */
  997. z4 = z3 + tmp12;
  998. tmp10 = MULTIPLY(z1, FIX(1.396802247)) + z2 + z4; /* c1 */
  999. tmp14 = MULTIPLY(z1, FIX(0.221231742)) - z2 + z4; /* c9 */
  1000. z2 = MULTIPLY(tmp11, FIX(0.587785252)); /* (c1-c9)/2 */
  1001. z4 = z3 - tmp12 - LEFT_SHIFT(tmp13, CONST_BITS - 1);
  1002. tmp12 = LEFT_SHIFT(z1 - tmp13, CONST_BITS) - z3;
  1003. tmp11 = MULTIPLY(z1, FIX(1.260073511)) - z2 - z4; /* c3 */
  1004. tmp13 = MULTIPLY(z1, FIX(0.642039522)) - z2 + z4; /* c7 */
  1005. /* Final output stage */
  1006. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10,
  1007. CONST_BITS+PASS1_BITS+3)
  1008. & RANGE_MASK];
  1009. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10,
  1010. CONST_BITS+PASS1_BITS+3)
  1011. & RANGE_MASK];
  1012. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11,
  1013. CONST_BITS+PASS1_BITS+3)
  1014. & RANGE_MASK];
  1015. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11,
  1016. CONST_BITS+PASS1_BITS+3)
  1017. & RANGE_MASK];
  1018. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12,
  1019. CONST_BITS+PASS1_BITS+3)
  1020. & RANGE_MASK];
  1021. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12,
  1022. CONST_BITS+PASS1_BITS+3)
  1023. & RANGE_MASK];
  1024. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13,
  1025. CONST_BITS+PASS1_BITS+3)
  1026. & RANGE_MASK];
  1027. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13,
  1028. CONST_BITS+PASS1_BITS+3)
  1029. & RANGE_MASK];
  1030. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14,
  1031. CONST_BITS+PASS1_BITS+3)
  1032. & RANGE_MASK];
  1033. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14,
  1034. CONST_BITS+PASS1_BITS+3)
  1035. & RANGE_MASK];
  1036. wsptr += 8; /* advance pointer to next row */
  1037. }
  1038. }
  1039. /*
  1040. * Perform dequantization and inverse DCT on one block of coefficients,
  1041. * producing a 11x11 output block.
  1042. *
  1043. * Optimized algorithm with 24 multiplications in the 1-D kernel.
  1044. * cK represents sqrt(2) * cos(K*pi/22).
  1045. */
  1046. GLOBAL(void)
  1047. jpeg_idct_11x11 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  1048. JCOEFPTR coef_block,
  1049. JSAMPARRAY output_buf, JDIMENSION output_col)
  1050. {
  1051. INT32 tmp10, tmp11, tmp12, tmp13, tmp14;
  1052. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25;
  1053. INT32 z1, z2, z3, z4;
  1054. JCOEFPTR inptr;
  1055. ISLOW_MULT_TYPE * quantptr;
  1056. int * wsptr;
  1057. JSAMPROW outptr;
  1058. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  1059. int ctr;
  1060. int workspace[8*11]; /* buffers data between passes */
  1061. SHIFT_TEMPS
  1062. /* Pass 1: process columns from input, store into work array. */
  1063. inptr = coef_block;
  1064. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  1065. wsptr = workspace;
  1066. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  1067. /* Even part */
  1068. tmp10 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  1069. tmp10 = LEFT_SHIFT(tmp10, CONST_BITS);
  1070. /* Add fudge factor here for final descale. */
  1071. tmp10 += ONE << (CONST_BITS-PASS1_BITS-1);
  1072. z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  1073. z2 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  1074. z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  1075. tmp20 = MULTIPLY(z2 - z3, FIX(2.546640132)); /* c2+c4 */
  1076. tmp23 = MULTIPLY(z2 - z1, FIX(0.430815045)); /* c2-c6 */
  1077. z4 = z1 + z3;
  1078. tmp24 = MULTIPLY(z4, - FIX(1.155664402)); /* -(c2-c10) */
  1079. z4 -= z2;
  1080. tmp25 = tmp10 + MULTIPLY(z4, FIX(1.356927976)); /* c2 */
  1081. tmp21 = tmp20 + tmp23 + tmp25 -
  1082. MULTIPLY(z2, FIX(1.821790775)); /* c2+c4+c10-c6 */
  1083. tmp20 += tmp25 + MULTIPLY(z3, FIX(2.115825087)); /* c4+c6 */
  1084. tmp23 += tmp25 - MULTIPLY(z1, FIX(1.513598477)); /* c6+c8 */
  1085. tmp24 += tmp25;
  1086. tmp22 = tmp24 - MULTIPLY(z3, FIX(0.788749120)); /* c8+c10 */
  1087. tmp24 += MULTIPLY(z2, FIX(1.944413522)) - /* c2+c8 */
  1088. MULTIPLY(z1, FIX(1.390975730)); /* c4+c10 */
  1089. tmp25 = tmp10 - MULTIPLY(z4, FIX(1.414213562)); /* c0 */
  1090. /* Odd part */
  1091. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  1092. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  1093. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  1094. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  1095. tmp11 = z1 + z2;
  1096. tmp14 = MULTIPLY(tmp11 + z3 + z4, FIX(0.398430003)); /* c9 */
  1097. tmp11 = MULTIPLY(tmp11, FIX(0.887983902)); /* c3-c9 */
  1098. tmp12 = MULTIPLY(z1 + z3, FIX(0.670361295)); /* c5-c9 */
  1099. tmp13 = tmp14 + MULTIPLY(z1 + z4, FIX(0.366151574)); /* c7-c9 */
  1100. tmp10 = tmp11 + tmp12 + tmp13 -
  1101. MULTIPLY(z1, FIX(0.923107866)); /* c7+c5+c3-c1-2*c9 */
  1102. z1 = tmp14 - MULTIPLY(z2 + z3, FIX(1.163011579)); /* c7+c9 */
  1103. tmp11 += z1 + MULTIPLY(z2, FIX(2.073276588)); /* c1+c7+3*c9-c3 */
  1104. tmp12 += z1 - MULTIPLY(z3, FIX(1.192193623)); /* c3+c5-c7-c9 */
  1105. z1 = MULTIPLY(z2 + z4, - FIX(1.798248910)); /* -(c1+c9) */
  1106. tmp11 += z1;
  1107. tmp13 += z1 + MULTIPLY(z4, FIX(2.102458632)); /* c1+c5+c9-c7 */
  1108. tmp14 += MULTIPLY(z2, - FIX(1.467221301)) + /* -(c5+c9) */
  1109. MULTIPLY(z3, FIX(1.001388905)) - /* c1-c9 */
  1110. MULTIPLY(z4, FIX(1.684843907)); /* c3+c9 */
  1111. /* Final output stage */
  1112. wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS);
  1113. wsptr[8*10] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS);
  1114. wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS);
  1115. wsptr[8*9] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS);
  1116. wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS);
  1117. wsptr[8*8] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS);
  1118. wsptr[8*3] = (int) RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS-PASS1_BITS);
  1119. wsptr[8*7] = (int) RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS-PASS1_BITS);
  1120. wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS);
  1121. wsptr[8*6] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS);
  1122. wsptr[8*5] = (int) RIGHT_SHIFT(tmp25, CONST_BITS-PASS1_BITS);
  1123. }
  1124. /* Pass 2: process 11 rows from work array, store into output array. */
  1125. wsptr = workspace;
  1126. for (ctr = 0; ctr < 11; ctr++) {
  1127. outptr = output_buf[ctr] + output_col;
  1128. /* Even part */
  1129. /* Add fudge factor here for final descale. */
  1130. tmp10 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  1131. tmp10 = LEFT_SHIFT(tmp10, CONST_BITS);
  1132. z1 = (INT32) wsptr[2];
  1133. z2 = (INT32) wsptr[4];
  1134. z3 = (INT32) wsptr[6];
  1135. tmp20 = MULTIPLY(z2 - z3, FIX(2.546640132)); /* c2+c4 */
  1136. tmp23 = MULTIPLY(z2 - z1, FIX(0.430815045)); /* c2-c6 */
  1137. z4 = z1 + z3;
  1138. tmp24 = MULTIPLY(z4, - FIX(1.155664402)); /* -(c2-c10) */
  1139. z4 -= z2;
  1140. tmp25 = tmp10 + MULTIPLY(z4, FIX(1.356927976)); /* c2 */
  1141. tmp21 = tmp20 + tmp23 + tmp25 -
  1142. MULTIPLY(z2, FIX(1.821790775)); /* c2+c4+c10-c6 */
  1143. tmp20 += tmp25 + MULTIPLY(z3, FIX(2.115825087)); /* c4+c6 */
  1144. tmp23 += tmp25 - MULTIPLY(z1, FIX(1.513598477)); /* c6+c8 */
  1145. tmp24 += tmp25;
  1146. tmp22 = tmp24 - MULTIPLY(z3, FIX(0.788749120)); /* c8+c10 */
  1147. tmp24 += MULTIPLY(z2, FIX(1.944413522)) - /* c2+c8 */
  1148. MULTIPLY(z1, FIX(1.390975730)); /* c4+c10 */
  1149. tmp25 = tmp10 - MULTIPLY(z4, FIX(1.414213562)); /* c0 */
  1150. /* Odd part */
  1151. z1 = (INT32) wsptr[1];
  1152. z2 = (INT32) wsptr[3];
  1153. z3 = (INT32) wsptr[5];
  1154. z4 = (INT32) wsptr[7];
  1155. tmp11 = z1 + z2;
  1156. tmp14 = MULTIPLY(tmp11 + z3 + z4, FIX(0.398430003)); /* c9 */
  1157. tmp11 = MULTIPLY(tmp11, FIX(0.887983902)); /* c3-c9 */
  1158. tmp12 = MULTIPLY(z1 + z3, FIX(0.670361295)); /* c5-c9 */
  1159. tmp13 = tmp14 + MULTIPLY(z1 + z4, FIX(0.366151574)); /* c7-c9 */
  1160. tmp10 = tmp11 + tmp12 + tmp13 -
  1161. MULTIPLY(z1, FIX(0.923107866)); /* c7+c5+c3-c1-2*c9 */
  1162. z1 = tmp14 - MULTIPLY(z2 + z3, FIX(1.163011579)); /* c7+c9 */
  1163. tmp11 += z1 + MULTIPLY(z2, FIX(2.073276588)); /* c1+c7+3*c9-c3 */
  1164. tmp12 += z1 - MULTIPLY(z3, FIX(1.192193623)); /* c3+c5-c7-c9 */
  1165. z1 = MULTIPLY(z2 + z4, - FIX(1.798248910)); /* -(c1+c9) */
  1166. tmp11 += z1;
  1167. tmp13 += z1 + MULTIPLY(z4, FIX(2.102458632)); /* c1+c5+c9-c7 */
  1168. tmp14 += MULTIPLY(z2, - FIX(1.467221301)) + /* -(c5+c9) */
  1169. MULTIPLY(z3, FIX(1.001388905)) - /* c1-c9 */
  1170. MULTIPLY(z4, FIX(1.684843907)); /* c3+c9 */
  1171. /* Final output stage */
  1172. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10,
  1173. CONST_BITS+PASS1_BITS+3)
  1174. & RANGE_MASK];
  1175. outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10,
  1176. CONST_BITS+PASS1_BITS+3)
  1177. & RANGE_MASK];
  1178. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11,
  1179. CONST_BITS+PASS1_BITS+3)
  1180. & RANGE_MASK];
  1181. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11,
  1182. CONST_BITS+PASS1_BITS+3)
  1183. & RANGE_MASK];
  1184. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12,
  1185. CONST_BITS+PASS1_BITS+3)
  1186. & RANGE_MASK];
  1187. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12,
  1188. CONST_BITS+PASS1_BITS+3)
  1189. & RANGE_MASK];
  1190. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13,
  1191. CONST_BITS+PASS1_BITS+3)
  1192. & RANGE_MASK];
  1193. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13,
  1194. CONST_BITS+PASS1_BITS+3)
  1195. & RANGE_MASK];
  1196. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14,
  1197. CONST_BITS+PASS1_BITS+3)
  1198. & RANGE_MASK];
  1199. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14,
  1200. CONST_BITS+PASS1_BITS+3)
  1201. & RANGE_MASK];
  1202. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25,
  1203. CONST_BITS+PASS1_BITS+3)
  1204. & RANGE_MASK];
  1205. wsptr += 8; /* advance pointer to next row */
  1206. }
  1207. }
  1208. /*
  1209. * Perform dequantization and inverse DCT on one block of coefficients,
  1210. * producing a 12x12 output block.
  1211. *
  1212. * Optimized algorithm with 15 multiplications in the 1-D kernel.
  1213. * cK represents sqrt(2) * cos(K*pi/24).
  1214. */
  1215. GLOBAL(void)
  1216. jpeg_idct_12x12 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  1217. JCOEFPTR coef_block,
  1218. JSAMPARRAY output_buf, JDIMENSION output_col)
  1219. {
  1220. INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
  1221. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25;
  1222. INT32 z1, z2, z3, z4;
  1223. JCOEFPTR inptr;
  1224. ISLOW_MULT_TYPE * quantptr;
  1225. int * wsptr;
  1226. JSAMPROW outptr;
  1227. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  1228. int ctr;
  1229. int workspace[8*12]; /* buffers data between passes */
  1230. SHIFT_TEMPS
  1231. /* Pass 1: process columns from input, store into work array. */
  1232. inptr = coef_block;
  1233. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  1234. wsptr = workspace;
  1235. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  1236. /* Even part */
  1237. z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  1238. z3 = LEFT_SHIFT(z3, CONST_BITS);
  1239. /* Add fudge factor here for final descale. */
  1240. z3 += ONE << (CONST_BITS-PASS1_BITS-1);
  1241. z4 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  1242. z4 = MULTIPLY(z4, FIX(1.224744871)); /* c4 */
  1243. tmp10 = z3 + z4;
  1244. tmp11 = z3 - z4;
  1245. z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  1246. z4 = MULTIPLY(z1, FIX(1.366025404)); /* c2 */
  1247. z1 = LEFT_SHIFT(z1, CONST_BITS);
  1248. z2 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  1249. z2 = LEFT_SHIFT(z2, CONST_BITS);
  1250. tmp12 = z1 - z2;
  1251. tmp21 = z3 + tmp12;
  1252. tmp24 = z3 - tmp12;
  1253. tmp12 = z4 + z2;
  1254. tmp20 = tmp10 + tmp12;
  1255. tmp25 = tmp10 - tmp12;
  1256. tmp12 = z4 - z1 - z2;
  1257. tmp22 = tmp11 + tmp12;
  1258. tmp23 = tmp11 - tmp12;
  1259. /* Odd part */
  1260. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  1261. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  1262. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  1263. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  1264. tmp11 = MULTIPLY(z2, FIX(1.306562965)); /* c3 */
  1265. tmp14 = MULTIPLY(z2, - FIX_0_541196100); /* -c9 */
  1266. tmp10 = z1 + z3;
  1267. tmp15 = MULTIPLY(tmp10 + z4, FIX(0.860918669)); /* c7 */
  1268. tmp12 = tmp15 + MULTIPLY(tmp10, FIX(0.261052384)); /* c5-c7 */
  1269. tmp10 = tmp12 + tmp11 + MULTIPLY(z1, FIX(0.280143716)); /* c1-c5 */
  1270. tmp13 = MULTIPLY(z3 + z4, - FIX(1.045510580)); /* -(c7+c11) */
  1271. tmp12 += tmp13 + tmp14 - MULTIPLY(z3, FIX(1.478575242)); /* c1+c5-c7-c11 */
  1272. tmp13 += tmp15 - tmp11 + MULTIPLY(z4, FIX(1.586706681)); /* c1+c11 */
  1273. tmp15 += tmp14 - MULTIPLY(z1, FIX(0.676326758)) - /* c7-c11 */
  1274. MULTIPLY(z4, FIX(1.982889723)); /* c5+c7 */
  1275. z1 -= z4;
  1276. z2 -= z3;
  1277. z3 = MULTIPLY(z1 + z2, FIX_0_541196100); /* c9 */
  1278. tmp11 = z3 + MULTIPLY(z1, FIX_0_765366865); /* c3-c9 */
  1279. tmp14 = z3 - MULTIPLY(z2, FIX_1_847759065); /* c3+c9 */
  1280. /* Final output stage */
  1281. wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS);
  1282. wsptr[8*11] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS);
  1283. wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS);
  1284. wsptr[8*10] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS);
  1285. wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS);
  1286. wsptr[8*9] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS);
  1287. wsptr[8*3] = (int) RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS-PASS1_BITS);
  1288. wsptr[8*8] = (int) RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS-PASS1_BITS);
  1289. wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS);
  1290. wsptr[8*7] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS);
  1291. wsptr[8*5] = (int) RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS-PASS1_BITS);
  1292. wsptr[8*6] = (int) RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS-PASS1_BITS);
  1293. }
  1294. /* Pass 2: process 12 rows from work array, store into output array. */
  1295. wsptr = workspace;
  1296. for (ctr = 0; ctr < 12; ctr++) {
  1297. outptr = output_buf[ctr] + output_col;
  1298. /* Even part */
  1299. /* Add fudge factor here for final descale. */
  1300. z3 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  1301. z3 = LEFT_SHIFT(z3, CONST_BITS);
  1302. z4 = (INT32) wsptr[4];
  1303. z4 = MULTIPLY(z4, FIX(1.224744871)); /* c4 */
  1304. tmp10 = z3 + z4;
  1305. tmp11 = z3 - z4;
  1306. z1 = (INT32) wsptr[2];
  1307. z4 = MULTIPLY(z1, FIX(1.366025404)); /* c2 */
  1308. z1 = LEFT_SHIFT(z1, CONST_BITS);
  1309. z2 = (INT32) wsptr[6];
  1310. z2 = LEFT_SHIFT(z2, CONST_BITS);
  1311. tmp12 = z1 - z2;
  1312. tmp21 = z3 + tmp12;
  1313. tmp24 = z3 - tmp12;
  1314. tmp12 = z4 + z2;
  1315. tmp20 = tmp10 + tmp12;
  1316. tmp25 = tmp10 - tmp12;
  1317. tmp12 = z4 - z1 - z2;
  1318. tmp22 = tmp11 + tmp12;
  1319. tmp23 = tmp11 - tmp12;
  1320. /* Odd part */
  1321. z1 = (INT32) wsptr[1];
  1322. z2 = (INT32) wsptr[3];
  1323. z3 = (INT32) wsptr[5];
  1324. z4 = (INT32) wsptr[7];
  1325. tmp11 = MULTIPLY(z2, FIX(1.306562965)); /* c3 */
  1326. tmp14 = MULTIPLY(z2, - FIX_0_541196100); /* -c9 */
  1327. tmp10 = z1 + z3;
  1328. tmp15 = MULTIPLY(tmp10 + z4, FIX(0.860918669)); /* c7 */
  1329. tmp12 = tmp15 + MULTIPLY(tmp10, FIX(0.261052384)); /* c5-c7 */
  1330. tmp10 = tmp12 + tmp11 + MULTIPLY(z1, FIX(0.280143716)); /* c1-c5 */
  1331. tmp13 = MULTIPLY(z3 + z4, - FIX(1.045510580)); /* -(c7+c11) */
  1332. tmp12 += tmp13 + tmp14 - MULTIPLY(z3, FIX(1.478575242)); /* c1+c5-c7-c11 */
  1333. tmp13 += tmp15 - tmp11 + MULTIPLY(z4, FIX(1.586706681)); /* c1+c11 */
  1334. tmp15 += tmp14 - MULTIPLY(z1, FIX(0.676326758)) - /* c7-c11 */
  1335. MULTIPLY(z4, FIX(1.982889723)); /* c5+c7 */
  1336. z1 -= z4;
  1337. z2 -= z3;
  1338. z3 = MULTIPLY(z1 + z2, FIX_0_541196100); /* c9 */
  1339. tmp11 = z3 + MULTIPLY(z1, FIX_0_765366865); /* c3-c9 */
  1340. tmp14 = z3 - MULTIPLY(z2, FIX_1_847759065); /* c3+c9 */
  1341. /* Final output stage */
  1342. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10,
  1343. CONST_BITS+PASS1_BITS+3)
  1344. & RANGE_MASK];
  1345. outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10,
  1346. CONST_BITS+PASS1_BITS+3)
  1347. & RANGE_MASK];
  1348. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11,
  1349. CONST_BITS+PASS1_BITS+3)
  1350. & RANGE_MASK];
  1351. outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11,
  1352. CONST_BITS+PASS1_BITS+3)
  1353. & RANGE_MASK];
  1354. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12,
  1355. CONST_BITS+PASS1_BITS+3)
  1356. & RANGE_MASK];
  1357. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12,
  1358. CONST_BITS+PASS1_BITS+3)
  1359. & RANGE_MASK];
  1360. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13,
  1361. CONST_BITS+PASS1_BITS+3)
  1362. & RANGE_MASK];
  1363. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13,
  1364. CONST_BITS+PASS1_BITS+3)
  1365. & RANGE_MASK];
  1366. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14,
  1367. CONST_BITS+PASS1_BITS+3)
  1368. & RANGE_MASK];
  1369. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14,
  1370. CONST_BITS+PASS1_BITS+3)
  1371. & RANGE_MASK];
  1372. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp15,
  1373. CONST_BITS+PASS1_BITS+3)
  1374. & RANGE_MASK];
  1375. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp15,
  1376. CONST_BITS+PASS1_BITS+3)
  1377. & RANGE_MASK];
  1378. wsptr += 8; /* advance pointer to next row */
  1379. }
  1380. }
  1381. /*
  1382. * Perform dequantization and inverse DCT on one block of coefficients,
  1383. * producing a 13x13 output block.
  1384. *
  1385. * Optimized algorithm with 29 multiplications in the 1-D kernel.
  1386. * cK represents sqrt(2) * cos(K*pi/26).
  1387. */
  1388. GLOBAL(void)
  1389. jpeg_idct_13x13 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  1390. JCOEFPTR coef_block,
  1391. JSAMPARRAY output_buf, JDIMENSION output_col)
  1392. {
  1393. INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
  1394. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26;
  1395. INT32 z1, z2, z3, z4;
  1396. JCOEFPTR inptr;
  1397. ISLOW_MULT_TYPE * quantptr;
  1398. int * wsptr;
  1399. JSAMPROW outptr;
  1400. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  1401. int ctr;
  1402. int workspace[8*13]; /* buffers data between passes */
  1403. SHIFT_TEMPS
  1404. /* Pass 1: process columns from input, store into work array. */
  1405. inptr = coef_block;
  1406. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  1407. wsptr = workspace;
  1408. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  1409. /* Even part */
  1410. z1 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  1411. z1 = LEFT_SHIFT(z1, CONST_BITS);
  1412. /* Add fudge factor here for final descale. */
  1413. z1 += ONE << (CONST_BITS-PASS1_BITS-1);
  1414. z2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  1415. z3 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  1416. z4 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  1417. tmp10 = z3 + z4;
  1418. tmp11 = z3 - z4;
  1419. tmp12 = MULTIPLY(tmp10, FIX(1.155388986)); /* (c4+c6)/2 */
  1420. tmp13 = MULTIPLY(tmp11, FIX(0.096834934)) + z1; /* (c4-c6)/2 */
  1421. tmp20 = MULTIPLY(z2, FIX(1.373119086)) + tmp12 + tmp13; /* c2 */
  1422. tmp22 = MULTIPLY(z2, FIX(0.501487041)) - tmp12 + tmp13; /* c10 */
  1423. tmp12 = MULTIPLY(tmp10, FIX(0.316450131)); /* (c8-c12)/2 */
  1424. tmp13 = MULTIPLY(tmp11, FIX(0.486914739)) + z1; /* (c8+c12)/2 */
  1425. tmp21 = MULTIPLY(z2, FIX(1.058554052)) - tmp12 + tmp13; /* c6 */
  1426. tmp25 = MULTIPLY(z2, - FIX(1.252223920)) + tmp12 + tmp13; /* c4 */
  1427. tmp12 = MULTIPLY(tmp10, FIX(0.435816023)); /* (c2-c10)/2 */
  1428. tmp13 = MULTIPLY(tmp11, FIX(0.937303064)) - z1; /* (c2+c10)/2 */
  1429. tmp23 = MULTIPLY(z2, - FIX(0.170464608)) - tmp12 - tmp13; /* c12 */
  1430. tmp24 = MULTIPLY(z2, - FIX(0.803364869)) + tmp12 - tmp13; /* c8 */
  1431. tmp26 = MULTIPLY(tmp11 - z2, FIX(1.414213562)) + z1; /* c0 */
  1432. /* Odd part */
  1433. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  1434. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  1435. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  1436. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  1437. tmp11 = MULTIPLY(z1 + z2, FIX(1.322312651)); /* c3 */
  1438. tmp12 = MULTIPLY(z1 + z3, FIX(1.163874945)); /* c5 */
  1439. tmp15 = z1 + z4;
  1440. tmp13 = MULTIPLY(tmp15, FIX(0.937797057)); /* c7 */
  1441. tmp10 = tmp11 + tmp12 + tmp13 -
  1442. MULTIPLY(z1, FIX(2.020082300)); /* c7+c5+c3-c1 */
  1443. tmp14 = MULTIPLY(z2 + z3, - FIX(0.338443458)); /* -c11 */
  1444. tmp11 += tmp14 + MULTIPLY(z2, FIX(0.837223564)); /* c5+c9+c11-c3 */
  1445. tmp12 += tmp14 - MULTIPLY(z3, FIX(1.572116027)); /* c1+c5-c9-c11 */
  1446. tmp14 = MULTIPLY(z2 + z4, - FIX(1.163874945)); /* -c5 */
  1447. tmp11 += tmp14;
  1448. tmp13 += tmp14 + MULTIPLY(z4, FIX(2.205608352)); /* c3+c5+c9-c7 */
  1449. tmp14 = MULTIPLY(z3 + z4, - FIX(0.657217813)); /* -c9 */
  1450. tmp12 += tmp14;
  1451. tmp13 += tmp14;
  1452. tmp15 = MULTIPLY(tmp15, FIX(0.338443458)); /* c11 */
  1453. tmp14 = tmp15 + MULTIPLY(z1, FIX(0.318774355)) - /* c9-c11 */
  1454. MULTIPLY(z2, FIX(0.466105296)); /* c1-c7 */
  1455. z1 = MULTIPLY(z3 - z2, FIX(0.937797057)); /* c7 */
  1456. tmp14 += z1;
  1457. tmp15 += z1 + MULTIPLY(z3, FIX(0.384515595)) - /* c3-c7 */
  1458. MULTIPLY(z4, FIX(1.742345811)); /* c1+c11 */
  1459. /* Final output stage */
  1460. wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS);
  1461. wsptr[8*12] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS);
  1462. wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS);
  1463. wsptr[8*11] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS);
  1464. wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS);
  1465. wsptr[8*10] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS);
  1466. wsptr[8*3] = (int) RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS-PASS1_BITS);
  1467. wsptr[8*9] = (int) RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS-PASS1_BITS);
  1468. wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS);
  1469. wsptr[8*8] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS);
  1470. wsptr[8*5] = (int) RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS-PASS1_BITS);
  1471. wsptr[8*7] = (int) RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS-PASS1_BITS);
  1472. wsptr[8*6] = (int) RIGHT_SHIFT(tmp26, CONST_BITS-PASS1_BITS);
  1473. }
  1474. /* Pass 2: process 13 rows from work array, store into output array. */
  1475. wsptr = workspace;
  1476. for (ctr = 0; ctr < 13; ctr++) {
  1477. outptr = output_buf[ctr] + output_col;
  1478. /* Even part */
  1479. /* Add fudge factor here for final descale. */
  1480. z1 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  1481. z1 = LEFT_SHIFT(z1, CONST_BITS);
  1482. z2 = (INT32) wsptr[2];
  1483. z3 = (INT32) wsptr[4];
  1484. z4 = (INT32) wsptr[6];
  1485. tmp10 = z3 + z4;
  1486. tmp11 = z3 - z4;
  1487. tmp12 = MULTIPLY(tmp10, FIX(1.155388986)); /* (c4+c6)/2 */
  1488. tmp13 = MULTIPLY(tmp11, FIX(0.096834934)) + z1; /* (c4-c6)/2 */
  1489. tmp20 = MULTIPLY(z2, FIX(1.373119086)) + tmp12 + tmp13; /* c2 */
  1490. tmp22 = MULTIPLY(z2, FIX(0.501487041)) - tmp12 + tmp13; /* c10 */
  1491. tmp12 = MULTIPLY(tmp10, FIX(0.316450131)); /* (c8-c12)/2 */
  1492. tmp13 = MULTIPLY(tmp11, FIX(0.486914739)) + z1; /* (c8+c12)/2 */
  1493. tmp21 = MULTIPLY(z2, FIX(1.058554052)) - tmp12 + tmp13; /* c6 */
  1494. tmp25 = MULTIPLY(z2, - FIX(1.252223920)) + tmp12 + tmp13; /* c4 */
  1495. tmp12 = MULTIPLY(tmp10, FIX(0.435816023)); /* (c2-c10)/2 */
  1496. tmp13 = MULTIPLY(tmp11, FIX(0.937303064)) - z1; /* (c2+c10)/2 */
  1497. tmp23 = MULTIPLY(z2, - FIX(0.170464608)) - tmp12 - tmp13; /* c12 */
  1498. tmp24 = MULTIPLY(z2, - FIX(0.803364869)) + tmp12 - tmp13; /* c8 */
  1499. tmp26 = MULTIPLY(tmp11 - z2, FIX(1.414213562)) + z1; /* c0 */
  1500. /* Odd part */
  1501. z1 = (INT32) wsptr[1];
  1502. z2 = (INT32) wsptr[3];
  1503. z3 = (INT32) wsptr[5];
  1504. z4 = (INT32) wsptr[7];
  1505. tmp11 = MULTIPLY(z1 + z2, FIX(1.322312651)); /* c3 */
  1506. tmp12 = MULTIPLY(z1 + z3, FIX(1.163874945)); /* c5 */
  1507. tmp15 = z1 + z4;
  1508. tmp13 = MULTIPLY(tmp15, FIX(0.937797057)); /* c7 */
  1509. tmp10 = tmp11 + tmp12 + tmp13 -
  1510. MULTIPLY(z1, FIX(2.020082300)); /* c7+c5+c3-c1 */
  1511. tmp14 = MULTIPLY(z2 + z3, - FIX(0.338443458)); /* -c11 */
  1512. tmp11 += tmp14 + MULTIPLY(z2, FIX(0.837223564)); /* c5+c9+c11-c3 */
  1513. tmp12 += tmp14 - MULTIPLY(z3, FIX(1.572116027)); /* c1+c5-c9-c11 */
  1514. tmp14 = MULTIPLY(z2 + z4, - FIX(1.163874945)); /* -c5 */
  1515. tmp11 += tmp14;
  1516. tmp13 += tmp14 + MULTIPLY(z4, FIX(2.205608352)); /* c3+c5+c9-c7 */
  1517. tmp14 = MULTIPLY(z3 + z4, - FIX(0.657217813)); /* -c9 */
  1518. tmp12 += tmp14;
  1519. tmp13 += tmp14;
  1520. tmp15 = MULTIPLY(tmp15, FIX(0.338443458)); /* c11 */
  1521. tmp14 = tmp15 + MULTIPLY(z1, FIX(0.318774355)) - /* c9-c11 */
  1522. MULTIPLY(z2, FIX(0.466105296)); /* c1-c7 */
  1523. z1 = MULTIPLY(z3 - z2, FIX(0.937797057)); /* c7 */
  1524. tmp14 += z1;
  1525. tmp15 += z1 + MULTIPLY(z3, FIX(0.384515595)) - /* c3-c7 */
  1526. MULTIPLY(z4, FIX(1.742345811)); /* c1+c11 */
  1527. /* Final output stage */
  1528. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10,
  1529. CONST_BITS+PASS1_BITS+3)
  1530. & RANGE_MASK];
  1531. outptr[12] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10,
  1532. CONST_BITS+PASS1_BITS+3)
  1533. & RANGE_MASK];
  1534. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11,
  1535. CONST_BITS+PASS1_BITS+3)
  1536. & RANGE_MASK];
  1537. outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11,
  1538. CONST_BITS+PASS1_BITS+3)
  1539. & RANGE_MASK];
  1540. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12,
  1541. CONST_BITS+PASS1_BITS+3)
  1542. & RANGE_MASK];
  1543. outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12,
  1544. CONST_BITS+PASS1_BITS+3)
  1545. & RANGE_MASK];
  1546. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13,
  1547. CONST_BITS+PASS1_BITS+3)
  1548. & RANGE_MASK];
  1549. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13,
  1550. CONST_BITS+PASS1_BITS+3)
  1551. & RANGE_MASK];
  1552. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14,
  1553. CONST_BITS+PASS1_BITS+3)
  1554. & RANGE_MASK];
  1555. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14,
  1556. CONST_BITS+PASS1_BITS+3)
  1557. & RANGE_MASK];
  1558. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp15,
  1559. CONST_BITS+PASS1_BITS+3)
  1560. & RANGE_MASK];
  1561. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp15,
  1562. CONST_BITS+PASS1_BITS+3)
  1563. & RANGE_MASK];
  1564. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp26,
  1565. CONST_BITS+PASS1_BITS+3)
  1566. & RANGE_MASK];
  1567. wsptr += 8; /* advance pointer to next row */
  1568. }
  1569. }
  1570. /*
  1571. * Perform dequantization and inverse DCT on one block of coefficients,
  1572. * producing a 14x14 output block.
  1573. *
  1574. * Optimized algorithm with 20 multiplications in the 1-D kernel.
  1575. * cK represents sqrt(2) * cos(K*pi/28).
  1576. */
  1577. GLOBAL(void)
  1578. jpeg_idct_14x14 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  1579. JCOEFPTR coef_block,
  1580. JSAMPARRAY output_buf, JDIMENSION output_col)
  1581. {
  1582. INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16;
  1583. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26;
  1584. INT32 z1, z2, z3, z4;
  1585. JCOEFPTR inptr;
  1586. ISLOW_MULT_TYPE * quantptr;
  1587. int * wsptr;
  1588. JSAMPROW outptr;
  1589. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  1590. int ctr;
  1591. int workspace[8*14]; /* buffers data between passes */
  1592. SHIFT_TEMPS
  1593. /* Pass 1: process columns from input, store into work array. */
  1594. inptr = coef_block;
  1595. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  1596. wsptr = workspace;
  1597. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  1598. /* Even part */
  1599. z1 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  1600. z1 = LEFT_SHIFT(z1, CONST_BITS);
  1601. /* Add fudge factor here for final descale. */
  1602. z1 += ONE << (CONST_BITS-PASS1_BITS-1);
  1603. z4 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  1604. z2 = MULTIPLY(z4, FIX(1.274162392)); /* c4 */
  1605. z3 = MULTIPLY(z4, FIX(0.314692123)); /* c12 */
  1606. z4 = MULTIPLY(z4, FIX(0.881747734)); /* c8 */
  1607. tmp10 = z1 + z2;
  1608. tmp11 = z1 + z3;
  1609. tmp12 = z1 - z4;
  1610. tmp23 = RIGHT_SHIFT(z1 - LEFT_SHIFT(z2 + z3 - z4, 1),
  1611. CONST_BITS-PASS1_BITS); /* c0 = (c4+c12-c8)*2 */
  1612. z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  1613. z2 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  1614. z3 = MULTIPLY(z1 + z2, FIX(1.105676686)); /* c6 */
  1615. tmp13 = z3 + MULTIPLY(z1, FIX(0.273079590)); /* c2-c6 */
  1616. tmp14 = z3 - MULTIPLY(z2, FIX(1.719280954)); /* c6+c10 */
  1617. tmp15 = MULTIPLY(z1, FIX(0.613604268)) - /* c10 */
  1618. MULTIPLY(z2, FIX(1.378756276)); /* c2 */
  1619. tmp20 = tmp10 + tmp13;
  1620. tmp26 = tmp10 - tmp13;
  1621. tmp21 = tmp11 + tmp14;
  1622. tmp25 = tmp11 - tmp14;
  1623. tmp22 = tmp12 + tmp15;
  1624. tmp24 = tmp12 - tmp15;
  1625. /* Odd part */
  1626. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  1627. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  1628. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  1629. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  1630. tmp13 = LEFT_SHIFT(z4, CONST_BITS);
  1631. tmp14 = z1 + z3;
  1632. tmp11 = MULTIPLY(z1 + z2, FIX(1.334852607)); /* c3 */
  1633. tmp12 = MULTIPLY(tmp14, FIX(1.197448846)); /* c5 */
  1634. tmp10 = tmp11 + tmp12 + tmp13 - MULTIPLY(z1, FIX(1.126980169)); /* c3+c5-c1 */
  1635. tmp14 = MULTIPLY(tmp14, FIX(0.752406978)); /* c9 */
  1636. tmp16 = tmp14 - MULTIPLY(z1, FIX(1.061150426)); /* c9+c11-c13 */
  1637. z1 -= z2;
  1638. tmp15 = MULTIPLY(z1, FIX(0.467085129)) - tmp13; /* c11 */
  1639. tmp16 += tmp15;
  1640. z1 += z4;
  1641. z4 = MULTIPLY(z2 + z3, - FIX(0.158341681)) - tmp13; /* -c13 */
  1642. tmp11 += z4 - MULTIPLY(z2, FIX(0.424103948)); /* c3-c9-c13 */
  1643. tmp12 += z4 - MULTIPLY(z3, FIX(2.373959773)); /* c3+c5-c13 */
  1644. z4 = MULTIPLY(z3 - z2, FIX(1.405321284)); /* c1 */
  1645. tmp14 += z4 + tmp13 - MULTIPLY(z3, FIX(1.6906431334)); /* c1+c9-c11 */
  1646. tmp15 += z4 + MULTIPLY(z2, FIX(0.674957567)); /* c1+c11-c5 */
  1647. tmp13 = LEFT_SHIFT(z1 - z3, PASS1_BITS);
  1648. /* Final output stage */
  1649. wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS);
  1650. wsptr[8*13] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS);
  1651. wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS);
  1652. wsptr[8*12] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS);
  1653. wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS);
  1654. wsptr[8*11] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS);
  1655. wsptr[8*3] = (int) (tmp23 + tmp13);
  1656. wsptr[8*10] = (int) (tmp23 - tmp13);
  1657. wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS);
  1658. wsptr[8*9] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS);
  1659. wsptr[8*5] = (int) RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS-PASS1_BITS);
  1660. wsptr[8*8] = (int) RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS-PASS1_BITS);
  1661. wsptr[8*6] = (int) RIGHT_SHIFT(tmp26 + tmp16, CONST_BITS-PASS1_BITS);
  1662. wsptr[8*7] = (int) RIGHT_SHIFT(tmp26 - tmp16, CONST_BITS-PASS1_BITS);
  1663. }
  1664. /* Pass 2: process 14 rows from work array, store into output array. */
  1665. wsptr = workspace;
  1666. for (ctr = 0; ctr < 14; ctr++) {
  1667. outptr = output_buf[ctr] + output_col;
  1668. /* Even part */
  1669. /* Add fudge factor here for final descale. */
  1670. z1 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  1671. z1 = LEFT_SHIFT(z1, CONST_BITS);
  1672. z4 = (INT32) wsptr[4];
  1673. z2 = MULTIPLY(z4, FIX(1.274162392)); /* c4 */
  1674. z3 = MULTIPLY(z4, FIX(0.314692123)); /* c12 */
  1675. z4 = MULTIPLY(z4, FIX(0.881747734)); /* c8 */
  1676. tmp10 = z1 + z2;
  1677. tmp11 = z1 + z3;
  1678. tmp12 = z1 - z4;
  1679. tmp23 = z1 - LEFT_SHIFT(z2 + z3 - z4, 1); /* c0 = (c4+c12-c8)*2 */
  1680. z1 = (INT32) wsptr[2];
  1681. z2 = (INT32) wsptr[6];
  1682. z3 = MULTIPLY(z1 + z2, FIX(1.105676686)); /* c6 */
  1683. tmp13 = z3 + MULTIPLY(z1, FIX(0.273079590)); /* c2-c6 */
  1684. tmp14 = z3 - MULTIPLY(z2, FIX(1.719280954)); /* c6+c10 */
  1685. tmp15 = MULTIPLY(z1, FIX(0.613604268)) - /* c10 */
  1686. MULTIPLY(z2, FIX(1.378756276)); /* c2 */
  1687. tmp20 = tmp10 + tmp13;
  1688. tmp26 = tmp10 - tmp13;
  1689. tmp21 = tmp11 + tmp14;
  1690. tmp25 = tmp11 - tmp14;
  1691. tmp22 = tmp12 + tmp15;
  1692. tmp24 = tmp12 - tmp15;
  1693. /* Odd part */
  1694. z1 = (INT32) wsptr[1];
  1695. z2 = (INT32) wsptr[3];
  1696. z3 = (INT32) wsptr[5];
  1697. z4 = (INT32) wsptr[7];
  1698. z4 = LEFT_SHIFT(z4, CONST_BITS);
  1699. tmp14 = z1 + z3;
  1700. tmp11 = MULTIPLY(z1 + z2, FIX(1.334852607)); /* c3 */
  1701. tmp12 = MULTIPLY(tmp14, FIX(1.197448846)); /* c5 */
  1702. tmp10 = tmp11 + tmp12 + z4 - MULTIPLY(z1, FIX(1.126980169)); /* c3+c5-c1 */
  1703. tmp14 = MULTIPLY(tmp14, FIX(0.752406978)); /* c9 */
  1704. tmp16 = tmp14 - MULTIPLY(z1, FIX(1.061150426)); /* c9+c11-c13 */
  1705. z1 -= z2;
  1706. tmp15 = MULTIPLY(z1, FIX(0.467085129)) - z4; /* c11 */
  1707. tmp16 += tmp15;
  1708. tmp13 = MULTIPLY(z2 + z3, - FIX(0.158341681)) - z4; /* -c13 */
  1709. tmp11 += tmp13 - MULTIPLY(z2, FIX(0.424103948)); /* c3-c9-c13 */
  1710. tmp12 += tmp13 - MULTIPLY(z3, FIX(2.373959773)); /* c3+c5-c13 */
  1711. tmp13 = MULTIPLY(z3 - z2, FIX(1.405321284)); /* c1 */
  1712. tmp14 += tmp13 + z4 - MULTIPLY(z3, FIX(1.6906431334)); /* c1+c9-c11 */
  1713. tmp15 += tmp13 + MULTIPLY(z2, FIX(0.674957567)); /* c1+c11-c5 */
  1714. tmp13 = LEFT_SHIFT(z1 - z3, CONST_BITS) + z4;
  1715. /* Final output stage */
  1716. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10,
  1717. CONST_BITS+PASS1_BITS+3)
  1718. & RANGE_MASK];
  1719. outptr[13] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10,
  1720. CONST_BITS+PASS1_BITS+3)
  1721. & RANGE_MASK];
  1722. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11,
  1723. CONST_BITS+PASS1_BITS+3)
  1724. & RANGE_MASK];
  1725. outptr[12] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11,
  1726. CONST_BITS+PASS1_BITS+3)
  1727. & RANGE_MASK];
  1728. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12,
  1729. CONST_BITS+PASS1_BITS+3)
  1730. & RANGE_MASK];
  1731. outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12,
  1732. CONST_BITS+PASS1_BITS+3)
  1733. & RANGE_MASK];
  1734. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13,
  1735. CONST_BITS+PASS1_BITS+3)
  1736. & RANGE_MASK];
  1737. outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13,
  1738. CONST_BITS+PASS1_BITS+3)
  1739. & RANGE_MASK];
  1740. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14,
  1741. CONST_BITS+PASS1_BITS+3)
  1742. & RANGE_MASK];
  1743. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14,
  1744. CONST_BITS+PASS1_BITS+3)
  1745. & RANGE_MASK];
  1746. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp15,
  1747. CONST_BITS+PASS1_BITS+3)
  1748. & RANGE_MASK];
  1749. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp15,
  1750. CONST_BITS+PASS1_BITS+3)
  1751. & RANGE_MASK];
  1752. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp26 + tmp16,
  1753. CONST_BITS+PASS1_BITS+3)
  1754. & RANGE_MASK];
  1755. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp26 - tmp16,
  1756. CONST_BITS+PASS1_BITS+3)
  1757. & RANGE_MASK];
  1758. wsptr += 8; /* advance pointer to next row */
  1759. }
  1760. }
  1761. /*
  1762. * Perform dequantization and inverse DCT on one block of coefficients,
  1763. * producing a 15x15 output block.
  1764. *
  1765. * Optimized algorithm with 22 multiplications in the 1-D kernel.
  1766. * cK represents sqrt(2) * cos(K*pi/30).
  1767. */
  1768. GLOBAL(void)
  1769. jpeg_idct_15x15 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  1770. JCOEFPTR coef_block,
  1771. JSAMPARRAY output_buf, JDIMENSION output_col)
  1772. {
  1773. INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16;
  1774. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26, tmp27;
  1775. INT32 z1, z2, z3, z4;
  1776. JCOEFPTR inptr;
  1777. ISLOW_MULT_TYPE * quantptr;
  1778. int * wsptr;
  1779. JSAMPROW outptr;
  1780. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  1781. int ctr;
  1782. int workspace[8*15]; /* buffers data between passes */
  1783. SHIFT_TEMPS
  1784. /* Pass 1: process columns from input, store into work array. */
  1785. inptr = coef_block;
  1786. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  1787. wsptr = workspace;
  1788. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  1789. /* Even part */
  1790. z1 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  1791. z1 = LEFT_SHIFT(z1, CONST_BITS);
  1792. /* Add fudge factor here for final descale. */
  1793. z1 += ONE << (CONST_BITS-PASS1_BITS-1);
  1794. z2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  1795. z3 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  1796. z4 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  1797. tmp10 = MULTIPLY(z4, FIX(0.437016024)); /* c12 */
  1798. tmp11 = MULTIPLY(z4, FIX(1.144122806)); /* c6 */
  1799. tmp12 = z1 - tmp10;
  1800. tmp13 = z1 + tmp11;
  1801. z1 -= LEFT_SHIFT(tmp11 - tmp10, 1); /* c0 = (c6-c12)*2 */
  1802. z4 = z2 - z3;
  1803. z3 += z2;
  1804. tmp10 = MULTIPLY(z3, FIX(1.337628990)); /* (c2+c4)/2 */
  1805. tmp11 = MULTIPLY(z4, FIX(0.045680613)); /* (c2-c4)/2 */
  1806. z2 = MULTIPLY(z2, FIX(1.439773946)); /* c4+c14 */
  1807. tmp20 = tmp13 + tmp10 + tmp11;
  1808. tmp23 = tmp12 - tmp10 + tmp11 + z2;
  1809. tmp10 = MULTIPLY(z3, FIX(0.547059574)); /* (c8+c14)/2 */
  1810. tmp11 = MULTIPLY(z4, FIX(0.399234004)); /* (c8-c14)/2 */
  1811. tmp25 = tmp13 - tmp10 - tmp11;
  1812. tmp26 = tmp12 + tmp10 - tmp11 - z2;
  1813. tmp10 = MULTIPLY(z3, FIX(0.790569415)); /* (c6+c12)/2 */
  1814. tmp11 = MULTIPLY(z4, FIX(0.353553391)); /* (c6-c12)/2 */
  1815. tmp21 = tmp12 + tmp10 + tmp11;
  1816. tmp24 = tmp13 - tmp10 + tmp11;
  1817. tmp11 += tmp11;
  1818. tmp22 = z1 + tmp11; /* c10 = c6-c12 */
  1819. tmp27 = z1 - tmp11 - tmp11; /* c0 = (c6-c12)*2 */
  1820. /* Odd part */
  1821. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  1822. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  1823. z4 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  1824. z3 = MULTIPLY(z4, FIX(1.224744871)); /* c5 */
  1825. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  1826. tmp13 = z2 - z4;
  1827. tmp15 = MULTIPLY(z1 + tmp13, FIX(0.831253876)); /* c9 */
  1828. tmp11 = tmp15 + MULTIPLY(z1, FIX(0.513743148)); /* c3-c9 */
  1829. tmp14 = tmp15 - MULTIPLY(tmp13, FIX(2.176250899)); /* c3+c9 */
  1830. tmp13 = MULTIPLY(z2, - FIX(0.831253876)); /* -c9 */
  1831. tmp15 = MULTIPLY(z2, - FIX(1.344997024)); /* -c3 */
  1832. z2 = z1 - z4;
  1833. tmp12 = z3 + MULTIPLY(z2, FIX(1.406466353)); /* c1 */
  1834. tmp10 = tmp12 + MULTIPLY(z4, FIX(2.457431844)) - tmp15; /* c1+c7 */
  1835. tmp16 = tmp12 - MULTIPLY(z1, FIX(1.112434820)) + tmp13; /* c1-c13 */
  1836. tmp12 = MULTIPLY(z2, FIX(1.224744871)) - z3; /* c5 */
  1837. z2 = MULTIPLY(z1 + z4, FIX(0.575212477)); /* c11 */
  1838. tmp13 += z2 + MULTIPLY(z1, FIX(0.475753014)) - z3; /* c7-c11 */
  1839. tmp15 += z2 - MULTIPLY(z4, FIX(0.869244010)) + z3; /* c11+c13 */
  1840. /* Final output stage */
  1841. wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS);
  1842. wsptr[8*14] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS);
  1843. wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS);
  1844. wsptr[8*13] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS);
  1845. wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS);
  1846. wsptr[8*12] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS);
  1847. wsptr[8*3] = (int) RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS-PASS1_BITS);
  1848. wsptr[8*11] = (int) RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS-PASS1_BITS);
  1849. wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS);
  1850. wsptr[8*10] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS);
  1851. wsptr[8*5] = (int) RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS-PASS1_BITS);
  1852. wsptr[8*9] = (int) RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS-PASS1_BITS);
  1853. wsptr[8*6] = (int) RIGHT_SHIFT(tmp26 + tmp16, CONST_BITS-PASS1_BITS);
  1854. wsptr[8*8] = (int) RIGHT_SHIFT(tmp26 - tmp16, CONST_BITS-PASS1_BITS);
  1855. wsptr[8*7] = (int) RIGHT_SHIFT(tmp27, CONST_BITS-PASS1_BITS);
  1856. }
  1857. /* Pass 2: process 15 rows from work array, store into output array. */
  1858. wsptr = workspace;
  1859. for (ctr = 0; ctr < 15; ctr++) {
  1860. outptr = output_buf[ctr] + output_col;
  1861. /* Even part */
  1862. /* Add fudge factor here for final descale. */
  1863. z1 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  1864. z1 = LEFT_SHIFT(z1, CONST_BITS);
  1865. z2 = (INT32) wsptr[2];
  1866. z3 = (INT32) wsptr[4];
  1867. z4 = (INT32) wsptr[6];
  1868. tmp10 = MULTIPLY(z4, FIX(0.437016024)); /* c12 */
  1869. tmp11 = MULTIPLY(z4, FIX(1.144122806)); /* c6 */
  1870. tmp12 = z1 - tmp10;
  1871. tmp13 = z1 + tmp11;
  1872. z1 -= LEFT_SHIFT(tmp11 - tmp10, 1); /* c0 = (c6-c12)*2 */
  1873. z4 = z2 - z3;
  1874. z3 += z2;
  1875. tmp10 = MULTIPLY(z3, FIX(1.337628990)); /* (c2+c4)/2 */
  1876. tmp11 = MULTIPLY(z4, FIX(0.045680613)); /* (c2-c4)/2 */
  1877. z2 = MULTIPLY(z2, FIX(1.439773946)); /* c4+c14 */
  1878. tmp20 = tmp13 + tmp10 + tmp11;
  1879. tmp23 = tmp12 - tmp10 + tmp11 + z2;
  1880. tmp10 = MULTIPLY(z3, FIX(0.547059574)); /* (c8+c14)/2 */
  1881. tmp11 = MULTIPLY(z4, FIX(0.399234004)); /* (c8-c14)/2 */
  1882. tmp25 = tmp13 - tmp10 - tmp11;
  1883. tmp26 = tmp12 + tmp10 - tmp11 - z2;
  1884. tmp10 = MULTIPLY(z3, FIX(0.790569415)); /* (c6+c12)/2 */
  1885. tmp11 = MULTIPLY(z4, FIX(0.353553391)); /* (c6-c12)/2 */
  1886. tmp21 = tmp12 + tmp10 + tmp11;
  1887. tmp24 = tmp13 - tmp10 + tmp11;
  1888. tmp11 += tmp11;
  1889. tmp22 = z1 + tmp11; /* c10 = c6-c12 */
  1890. tmp27 = z1 - tmp11 - tmp11; /* c0 = (c6-c12)*2 */
  1891. /* Odd part */
  1892. z1 = (INT32) wsptr[1];
  1893. z2 = (INT32) wsptr[3];
  1894. z4 = (INT32) wsptr[5];
  1895. z3 = MULTIPLY(z4, FIX(1.224744871)); /* c5 */
  1896. z4 = (INT32) wsptr[7];
  1897. tmp13 = z2 - z4;
  1898. tmp15 = MULTIPLY(z1 + tmp13, FIX(0.831253876)); /* c9 */
  1899. tmp11 = tmp15 + MULTIPLY(z1, FIX(0.513743148)); /* c3-c9 */
  1900. tmp14 = tmp15 - MULTIPLY(tmp13, FIX(2.176250899)); /* c3+c9 */
  1901. tmp13 = MULTIPLY(z2, - FIX(0.831253876)); /* -c9 */
  1902. tmp15 = MULTIPLY(z2, - FIX(1.344997024)); /* -c3 */
  1903. z2 = z1 - z4;
  1904. tmp12 = z3 + MULTIPLY(z2, FIX(1.406466353)); /* c1 */
  1905. tmp10 = tmp12 + MULTIPLY(z4, FIX(2.457431844)) - tmp15; /* c1+c7 */
  1906. tmp16 = tmp12 - MULTIPLY(z1, FIX(1.112434820)) + tmp13; /* c1-c13 */
  1907. tmp12 = MULTIPLY(z2, FIX(1.224744871)) - z3; /* c5 */
  1908. z2 = MULTIPLY(z1 + z4, FIX(0.575212477)); /* c11 */
  1909. tmp13 += z2 + MULTIPLY(z1, FIX(0.475753014)) - z3; /* c7-c11 */
  1910. tmp15 += z2 - MULTIPLY(z4, FIX(0.869244010)) + z3; /* c11+c13 */
  1911. /* Final output stage */
  1912. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10,
  1913. CONST_BITS+PASS1_BITS+3)
  1914. & RANGE_MASK];
  1915. outptr[14] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10,
  1916. CONST_BITS+PASS1_BITS+3)
  1917. & RANGE_MASK];
  1918. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11,
  1919. CONST_BITS+PASS1_BITS+3)
  1920. & RANGE_MASK];
  1921. outptr[13] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11,
  1922. CONST_BITS+PASS1_BITS+3)
  1923. & RANGE_MASK];
  1924. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12,
  1925. CONST_BITS+PASS1_BITS+3)
  1926. & RANGE_MASK];
  1927. outptr[12] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12,
  1928. CONST_BITS+PASS1_BITS+3)
  1929. & RANGE_MASK];
  1930. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13,
  1931. CONST_BITS+PASS1_BITS+3)
  1932. & RANGE_MASK];
  1933. outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13,
  1934. CONST_BITS+PASS1_BITS+3)
  1935. & RANGE_MASK];
  1936. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14,
  1937. CONST_BITS+PASS1_BITS+3)
  1938. & RANGE_MASK];
  1939. outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14,
  1940. CONST_BITS+PASS1_BITS+3)
  1941. & RANGE_MASK];
  1942. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp15,
  1943. CONST_BITS+PASS1_BITS+3)
  1944. & RANGE_MASK];
  1945. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp15,
  1946. CONST_BITS+PASS1_BITS+3)
  1947. & RANGE_MASK];
  1948. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp26 + tmp16,
  1949. CONST_BITS+PASS1_BITS+3)
  1950. & RANGE_MASK];
  1951. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp26 - tmp16,
  1952. CONST_BITS+PASS1_BITS+3)
  1953. & RANGE_MASK];
  1954. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp27,
  1955. CONST_BITS+PASS1_BITS+3)
  1956. & RANGE_MASK];
  1957. wsptr += 8; /* advance pointer to next row */
  1958. }
  1959. }
  1960. /*
  1961. * Perform dequantization and inverse DCT on one block of coefficients,
  1962. * producing a 16x16 output block.
  1963. *
  1964. * Optimized algorithm with 28 multiplications in the 1-D kernel.
  1965. * cK represents sqrt(2) * cos(K*pi/32).
  1966. */
  1967. GLOBAL(void)
  1968. jpeg_idct_16x16 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  1969. JCOEFPTR coef_block,
  1970. JSAMPARRAY output_buf, JDIMENSION output_col)
  1971. {
  1972. INT32 tmp0, tmp1, tmp2, tmp3, tmp10, tmp11, tmp12, tmp13;
  1973. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26, tmp27;
  1974. INT32 z1, z2, z3, z4;
  1975. JCOEFPTR inptr;
  1976. ISLOW_MULT_TYPE * quantptr;
  1977. int * wsptr;
  1978. JSAMPROW outptr;
  1979. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  1980. int ctr;
  1981. int workspace[8*16]; /* buffers data between passes */
  1982. SHIFT_TEMPS
  1983. /* Pass 1: process columns from input, store into work array. */
  1984. inptr = coef_block;
  1985. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  1986. wsptr = workspace;
  1987. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  1988. /* Even part */
  1989. tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  1990. tmp0 = LEFT_SHIFT(tmp0, CONST_BITS);
  1991. /* Add fudge factor here for final descale. */
  1992. tmp0 += 1 << (CONST_BITS-PASS1_BITS-1);
  1993. z1 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  1994. tmp1 = MULTIPLY(z1, FIX(1.306562965)); /* c4[16] = c2[8] */
  1995. tmp2 = MULTIPLY(z1, FIX_0_541196100); /* c12[16] = c6[8] */
  1996. tmp10 = tmp0 + tmp1;
  1997. tmp11 = tmp0 - tmp1;
  1998. tmp12 = tmp0 + tmp2;
  1999. tmp13 = tmp0 - tmp2;
  2000. z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  2001. z2 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  2002. z3 = z1 - z2;
  2003. z4 = MULTIPLY(z3, FIX(0.275899379)); /* c14[16] = c7[8] */
  2004. z3 = MULTIPLY(z3, FIX(1.387039845)); /* c2[16] = c1[8] */
  2005. tmp0 = z3 + MULTIPLY(z2, FIX_2_562915447); /* (c6+c2)[16] = (c3+c1)[8] */
  2006. tmp1 = z4 + MULTIPLY(z1, FIX_0_899976223); /* (c6-c14)[16] = (c3-c7)[8] */
  2007. tmp2 = z3 - MULTIPLY(z1, FIX(0.601344887)); /* (c2-c10)[16] = (c1-c5)[8] */
  2008. tmp3 = z4 - MULTIPLY(z2, FIX(0.509795579)); /* (c10-c14)[16] = (c5-c7)[8] */
  2009. tmp20 = tmp10 + tmp0;
  2010. tmp27 = tmp10 - tmp0;
  2011. tmp21 = tmp12 + tmp1;
  2012. tmp26 = tmp12 - tmp1;
  2013. tmp22 = tmp13 + tmp2;
  2014. tmp25 = tmp13 - tmp2;
  2015. tmp23 = tmp11 + tmp3;
  2016. tmp24 = tmp11 - tmp3;
  2017. /* Odd part */
  2018. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  2019. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  2020. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  2021. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  2022. tmp11 = z1 + z3;
  2023. tmp1 = MULTIPLY(z1 + z2, FIX(1.353318001)); /* c3 */
  2024. tmp2 = MULTIPLY(tmp11, FIX(1.247225013)); /* c5 */
  2025. tmp3 = MULTIPLY(z1 + z4, FIX(1.093201867)); /* c7 */
  2026. tmp10 = MULTIPLY(z1 - z4, FIX(0.897167586)); /* c9 */
  2027. tmp11 = MULTIPLY(tmp11, FIX(0.666655658)); /* c11 */
  2028. tmp12 = MULTIPLY(z1 - z2, FIX(0.410524528)); /* c13 */
  2029. tmp0 = tmp1 + tmp2 + tmp3 -
  2030. MULTIPLY(z1, FIX(2.286341144)); /* c7+c5+c3-c1 */
  2031. tmp13 = tmp10 + tmp11 + tmp12 -
  2032. MULTIPLY(z1, FIX(1.835730603)); /* c9+c11+c13-c15 */
  2033. z1 = MULTIPLY(z2 + z3, FIX(0.138617169)); /* c15 */
  2034. tmp1 += z1 + MULTIPLY(z2, FIX(0.071888074)); /* c9+c11-c3-c15 */
  2035. tmp2 += z1 - MULTIPLY(z3, FIX(1.125726048)); /* c5+c7+c15-c3 */
  2036. z1 = MULTIPLY(z3 - z2, FIX(1.407403738)); /* c1 */
  2037. tmp11 += z1 - MULTIPLY(z3, FIX(0.766367282)); /* c1+c11-c9-c13 */
  2038. tmp12 += z1 + MULTIPLY(z2, FIX(1.971951411)); /* c1+c5+c13-c7 */
  2039. z2 += z4;
  2040. z1 = MULTIPLY(z2, - FIX(0.666655658)); /* -c11 */
  2041. tmp1 += z1;
  2042. tmp3 += z1 + MULTIPLY(z4, FIX(1.065388962)); /* c3+c11+c15-c7 */
  2043. z2 = MULTIPLY(z2, - FIX(1.247225013)); /* -c5 */
  2044. tmp10 += z2 + MULTIPLY(z4, FIX(3.141271809)); /* c1+c5+c9-c13 */
  2045. tmp12 += z2;
  2046. z2 = MULTIPLY(z3 + z4, - FIX(1.353318001)); /* -c3 */
  2047. tmp2 += z2;
  2048. tmp3 += z2;
  2049. z2 = MULTIPLY(z4 - z3, FIX(0.410524528)); /* c13 */
  2050. tmp10 += z2;
  2051. tmp11 += z2;
  2052. /* Final output stage */
  2053. wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp0, CONST_BITS-PASS1_BITS);
  2054. wsptr[8*15] = (int) RIGHT_SHIFT(tmp20 - tmp0, CONST_BITS-PASS1_BITS);
  2055. wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp1, CONST_BITS-PASS1_BITS);
  2056. wsptr[8*14] = (int) RIGHT_SHIFT(tmp21 - tmp1, CONST_BITS-PASS1_BITS);
  2057. wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp2, CONST_BITS-PASS1_BITS);
  2058. wsptr[8*13] = (int) RIGHT_SHIFT(tmp22 - tmp2, CONST_BITS-PASS1_BITS);
  2059. wsptr[8*3] = (int) RIGHT_SHIFT(tmp23 + tmp3, CONST_BITS-PASS1_BITS);
  2060. wsptr[8*12] = (int) RIGHT_SHIFT(tmp23 - tmp3, CONST_BITS-PASS1_BITS);
  2061. wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp10, CONST_BITS-PASS1_BITS);
  2062. wsptr[8*11] = (int) RIGHT_SHIFT(tmp24 - tmp10, CONST_BITS-PASS1_BITS);
  2063. wsptr[8*5] = (int) RIGHT_SHIFT(tmp25 + tmp11, CONST_BITS-PASS1_BITS);
  2064. wsptr[8*10] = (int) RIGHT_SHIFT(tmp25 - tmp11, CONST_BITS-PASS1_BITS);
  2065. wsptr[8*6] = (int) RIGHT_SHIFT(tmp26 + tmp12, CONST_BITS-PASS1_BITS);
  2066. wsptr[8*9] = (int) RIGHT_SHIFT(tmp26 - tmp12, CONST_BITS-PASS1_BITS);
  2067. wsptr[8*7] = (int) RIGHT_SHIFT(tmp27 + tmp13, CONST_BITS-PASS1_BITS);
  2068. wsptr[8*8] = (int) RIGHT_SHIFT(tmp27 - tmp13, CONST_BITS-PASS1_BITS);
  2069. }
  2070. /* Pass 2: process 16 rows from work array, store into output array. */
  2071. wsptr = workspace;
  2072. for (ctr = 0; ctr < 16; ctr++) {
  2073. outptr = output_buf[ctr] + output_col;
  2074. /* Even part */
  2075. /* Add fudge factor here for final descale. */
  2076. tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  2077. tmp0 = LEFT_SHIFT(tmp0, CONST_BITS);
  2078. z1 = (INT32) wsptr[4];
  2079. tmp1 = MULTIPLY(z1, FIX(1.306562965)); /* c4[16] = c2[8] */
  2080. tmp2 = MULTIPLY(z1, FIX_0_541196100); /* c12[16] = c6[8] */
  2081. tmp10 = tmp0 + tmp1;
  2082. tmp11 = tmp0 - tmp1;
  2083. tmp12 = tmp0 + tmp2;
  2084. tmp13 = tmp0 - tmp2;
  2085. z1 = (INT32) wsptr[2];
  2086. z2 = (INT32) wsptr[6];
  2087. z3 = z1 - z2;
  2088. z4 = MULTIPLY(z3, FIX(0.275899379)); /* c14[16] = c7[8] */
  2089. z3 = MULTIPLY(z3, FIX(1.387039845)); /* c2[16] = c1[8] */
  2090. tmp0 = z3 + MULTIPLY(z2, FIX_2_562915447); /* (c6+c2)[16] = (c3+c1)[8] */
  2091. tmp1 = z4 + MULTIPLY(z1, FIX_0_899976223); /* (c6-c14)[16] = (c3-c7)[8] */
  2092. tmp2 = z3 - MULTIPLY(z1, FIX(0.601344887)); /* (c2-c10)[16] = (c1-c5)[8] */
  2093. tmp3 = z4 - MULTIPLY(z2, FIX(0.509795579)); /* (c10-c14)[16] = (c5-c7)[8] */
  2094. tmp20 = tmp10 + tmp0;
  2095. tmp27 = tmp10 - tmp0;
  2096. tmp21 = tmp12 + tmp1;
  2097. tmp26 = tmp12 - tmp1;
  2098. tmp22 = tmp13 + tmp2;
  2099. tmp25 = tmp13 - tmp2;
  2100. tmp23 = tmp11 + tmp3;
  2101. tmp24 = tmp11 - tmp3;
  2102. /* Odd part */
  2103. z1 = (INT32) wsptr[1];
  2104. z2 = (INT32) wsptr[3];
  2105. z3 = (INT32) wsptr[5];
  2106. z4 = (INT32) wsptr[7];
  2107. tmp11 = z1 + z3;
  2108. tmp1 = MULTIPLY(z1 + z2, FIX(1.353318001)); /* c3 */
  2109. tmp2 = MULTIPLY(tmp11, FIX(1.247225013)); /* c5 */
  2110. tmp3 = MULTIPLY(z1 + z4, FIX(1.093201867)); /* c7 */
  2111. tmp10 = MULTIPLY(z1 - z4, FIX(0.897167586)); /* c9 */
  2112. tmp11 = MULTIPLY(tmp11, FIX(0.666655658)); /* c11 */
  2113. tmp12 = MULTIPLY(z1 - z2, FIX(0.410524528)); /* c13 */
  2114. tmp0 = tmp1 + tmp2 + tmp3 -
  2115. MULTIPLY(z1, FIX(2.286341144)); /* c7+c5+c3-c1 */
  2116. tmp13 = tmp10 + tmp11 + tmp12 -
  2117. MULTIPLY(z1, FIX(1.835730603)); /* c9+c11+c13-c15 */
  2118. z1 = MULTIPLY(z2 + z3, FIX(0.138617169)); /* c15 */
  2119. tmp1 += z1 + MULTIPLY(z2, FIX(0.071888074)); /* c9+c11-c3-c15 */
  2120. tmp2 += z1 - MULTIPLY(z3, FIX(1.125726048)); /* c5+c7+c15-c3 */
  2121. z1 = MULTIPLY(z3 - z2, FIX(1.407403738)); /* c1 */
  2122. tmp11 += z1 - MULTIPLY(z3, FIX(0.766367282)); /* c1+c11-c9-c13 */
  2123. tmp12 += z1 + MULTIPLY(z2, FIX(1.971951411)); /* c1+c5+c13-c7 */
  2124. z2 += z4;
  2125. z1 = MULTIPLY(z2, - FIX(0.666655658)); /* -c11 */
  2126. tmp1 += z1;
  2127. tmp3 += z1 + MULTIPLY(z4, FIX(1.065388962)); /* c3+c11+c15-c7 */
  2128. z2 = MULTIPLY(z2, - FIX(1.247225013)); /* -c5 */
  2129. tmp10 += z2 + MULTIPLY(z4, FIX(3.141271809)); /* c1+c5+c9-c13 */
  2130. tmp12 += z2;
  2131. z2 = MULTIPLY(z3 + z4, - FIX(1.353318001)); /* -c3 */
  2132. tmp2 += z2;
  2133. tmp3 += z2;
  2134. z2 = MULTIPLY(z4 - z3, FIX(0.410524528)); /* c13 */
  2135. tmp10 += z2;
  2136. tmp11 += z2;
  2137. /* Final output stage */
  2138. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp0,
  2139. CONST_BITS+PASS1_BITS+3)
  2140. & RANGE_MASK];
  2141. outptr[15] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp0,
  2142. CONST_BITS+PASS1_BITS+3)
  2143. & RANGE_MASK];
  2144. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp1,
  2145. CONST_BITS+PASS1_BITS+3)
  2146. & RANGE_MASK];
  2147. outptr[14] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp1,
  2148. CONST_BITS+PASS1_BITS+3)
  2149. & RANGE_MASK];
  2150. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp2,
  2151. CONST_BITS+PASS1_BITS+3)
  2152. & RANGE_MASK];
  2153. outptr[13] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp2,
  2154. CONST_BITS+PASS1_BITS+3)
  2155. & RANGE_MASK];
  2156. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp3,
  2157. CONST_BITS+PASS1_BITS+3)
  2158. & RANGE_MASK];
  2159. outptr[12] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp3,
  2160. CONST_BITS+PASS1_BITS+3)
  2161. & RANGE_MASK];
  2162. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp10,
  2163. CONST_BITS+PASS1_BITS+3)
  2164. & RANGE_MASK];
  2165. outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp10,
  2166. CONST_BITS+PASS1_BITS+3)
  2167. & RANGE_MASK];
  2168. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp11,
  2169. CONST_BITS+PASS1_BITS+3)
  2170. & RANGE_MASK];
  2171. outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp11,
  2172. CONST_BITS+PASS1_BITS+3)
  2173. & RANGE_MASK];
  2174. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp26 + tmp12,
  2175. CONST_BITS+PASS1_BITS+3)
  2176. & RANGE_MASK];
  2177. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp26 - tmp12,
  2178. CONST_BITS+PASS1_BITS+3)
  2179. & RANGE_MASK];
  2180. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp27 + tmp13,
  2181. CONST_BITS+PASS1_BITS+3)
  2182. & RANGE_MASK];
  2183. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp27 - tmp13,
  2184. CONST_BITS+PASS1_BITS+3)
  2185. & RANGE_MASK];
  2186. wsptr += 8; /* advance pointer to next row */
  2187. }
  2188. }
  2189. #endif /* IDCT_SCALING_SUPPORTED */
  2190. #endif /* DCT_ISLOW_SUPPORTED */