jidctint.c 178 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179
  1. /*
  2. * jidctint.c
  3. *
  4. * Copyright (C) 1991-1998, Thomas G. Lane.
  5. * Modification developed 2002-2013 by Guido Vollbeding.
  6. * This file is part of the Independent JPEG Group's software.
  7. * For conditions of distribution and use, see the accompanying README file.
  8. *
  9. * This file contains a slow-but-accurate integer implementation of the
  10. * inverse DCT (Discrete Cosine Transform). In the IJG code, this routine
  11. * must also perform dequantization of the input coefficients.
  12. *
  13. * A 2-D IDCT can be done by 1-D IDCT on each column followed by 1-D IDCT
  14. * on each row (or vice versa, but it's more convenient to emit a row at
  15. * a time). Direct algorithms are also available, but they are much more
  16. * complex and seem not to be any faster when reduced to code.
  17. *
  18. * This implementation is based on an algorithm described in
  19. * C. Loeffler, A. Ligtenberg and G. Moschytz, "Practical Fast 1-D DCT
  20. * Algorithms with 11 Multiplications", Proc. Int'l. Conf. on Acoustics,
  21. * Speech, and Signal Processing 1989 (ICASSP '89), pp. 988-991.
  22. * The primary algorithm described there uses 11 multiplies and 29 adds.
  23. * We use their alternate method with 12 multiplies and 32 adds.
  24. * The advantage of this method is that no data path contains more than one
  25. * multiplication; this allows a very simple and accurate implementation in
  26. * scaled fixed-point arithmetic, with a minimal number of shifts.
  27. *
  28. * We also provide IDCT routines with various output sample block sizes for
  29. * direct resolution reduction or enlargement and for direct resolving the
  30. * common 2x1 and 1x2 subsampling cases without additional resampling: NxN
  31. * (N=1...16), 2NxN, and Nx2N (N=1...8) pixels for one 8x8 input DCT block.
  32. *
  33. * For N<8 we simply take the corresponding low-frequency coefficients of
  34. * the 8x8 input DCT block and apply an NxN point IDCT on the sub-block
  35. * to yield the downscaled outputs.
  36. * This can be seen as direct low-pass downsampling from the DCT domain
  37. * point of view rather than the usual spatial domain point of view,
  38. * yielding significant computational savings and results at least
  39. * as good as common bilinear (averaging) spatial downsampling.
  40. *
  41. * For N>8 we apply a partial NxN IDCT on the 8 input coefficients as
  42. * lower frequencies and higher frequencies assumed to be zero.
  43. * It turns out that the computational effort is similar to the 8x8 IDCT
  44. * regarding the output size.
  45. * Furthermore, the scaling and descaling is the same for all IDCT sizes.
  46. *
  47. * CAUTION: We rely on the FIX() macro except for the N=1,2,4,8 cases
  48. * since there would be too many additional constants to pre-calculate.
  49. */
  50. #define JPEG_INTERNALS
  51. #include "jinclude.h"
  52. #include "jpeglib.h"
  53. #include "jdct.h" /* Private declarations for DCT subsystem */
  54. #ifdef DCT_ISLOW_SUPPORTED
  55. /*
  56. * This module is specialized to the case DCTSIZE = 8.
  57. */
  58. #if DCTSIZE != 8
  59. Sorry, this code only copes with 8x8 DCT blocks. /* deliberate syntax err */
  60. #endif
  61. /*
  62. * The poop on this scaling stuff is as follows:
  63. *
  64. * Each 1-D IDCT step produces outputs which are a factor of sqrt(N)
  65. * larger than the true IDCT outputs. The final outputs are therefore
  66. * a factor of N larger than desired; since N=8 this can be cured by
  67. * a simple right shift at the end of the algorithm. The advantage of
  68. * this arrangement is that we save two multiplications per 1-D IDCT,
  69. * because the y0 and y4 inputs need not be divided by sqrt(N).
  70. *
  71. * We have to do addition and subtraction of the integer inputs, which
  72. * is no problem, and multiplication by fractional constants, which is
  73. * a problem to do in integer arithmetic. We multiply all the constants
  74. * by CONST_SCALE and convert them to integer constants (thus retaining
  75. * CONST_BITS bits of precision in the constants). After doing a
  76. * multiplication we have to divide the product by CONST_SCALE, with proper
  77. * rounding, to produce the correct output. This division can be done
  78. * cheaply as a right shift of CONST_BITS bits. We postpone shifting
  79. * as long as possible so that partial sums can be added together with
  80. * full fractional precision.
  81. *
  82. * The outputs of the first pass are scaled up by PASS1_BITS bits so that
  83. * they are represented to better-than-integral precision. These outputs
  84. * require BITS_IN_JSAMPLE + PASS1_BITS + 3 bits; this fits in a 16-bit word
  85. * with the recommended scaling. (To scale up 12-bit sample data further, an
  86. * intermediate INT32 array would be needed.)
  87. *
  88. * To avoid overflow of the 32-bit intermediate results in pass 2, we must
  89. * have BITS_IN_JSAMPLE + CONST_BITS + PASS1_BITS <= 26. Error analysis
  90. * shows that the values given below are the most effective.
  91. */
  92. #if BITS_IN_JSAMPLE == 8
  93. #define CONST_BITS 13
  94. #define PASS1_BITS 2
  95. #else
  96. #define CONST_BITS 13
  97. #define PASS1_BITS 1 /* lose a little precision to avoid overflow */
  98. #endif
  99. /* Some C compilers fail to reduce "FIX(constant)" at compile time, thus
  100. * causing a lot of useless floating-point operations at run time.
  101. * To get around this we use the following pre-calculated constants.
  102. * If you change CONST_BITS you may want to add appropriate values.
  103. * (With a reasonable C compiler, you can just rely on the FIX() macro...)
  104. */
  105. #if CONST_BITS == 13
  106. #define FIX_0_298631336 ((INT32) 2446) /* FIX(0.298631336) */
  107. #define FIX_0_390180644 ((INT32) 3196) /* FIX(0.390180644) */
  108. #define FIX_0_541196100 ((INT32) 4433) /* FIX(0.541196100) */
  109. #define FIX_0_765366865 ((INT32) 6270) /* FIX(0.765366865) */
  110. #define FIX_0_899976223 ((INT32) 7373) /* FIX(0.899976223) */
  111. #define FIX_1_175875602 ((INT32) 9633) /* FIX(1.175875602) */
  112. #define FIX_1_501321110 ((INT32) 12299) /* FIX(1.501321110) */
  113. #define FIX_1_847759065 ((INT32) 15137) /* FIX(1.847759065) */
  114. #define FIX_1_961570560 ((INT32) 16069) /* FIX(1.961570560) */
  115. #define FIX_2_053119869 ((INT32) 16819) /* FIX(2.053119869) */
  116. #define FIX_2_562915447 ((INT32) 20995) /* FIX(2.562915447) */
  117. #define FIX_3_072711026 ((INT32) 25172) /* FIX(3.072711026) */
  118. #else
  119. #define FIX_0_298631336 FIX(0.298631336)
  120. #define FIX_0_390180644 FIX(0.390180644)
  121. #define FIX_0_541196100 FIX(0.541196100)
  122. #define FIX_0_765366865 FIX(0.765366865)
  123. #define FIX_0_899976223 FIX(0.899976223)
  124. #define FIX_1_175875602 FIX(1.175875602)
  125. #define FIX_1_501321110 FIX(1.501321110)
  126. #define FIX_1_847759065 FIX(1.847759065)
  127. #define FIX_1_961570560 FIX(1.961570560)
  128. #define FIX_2_053119869 FIX(2.053119869)
  129. #define FIX_2_562915447 FIX(2.562915447)
  130. #define FIX_3_072711026 FIX(3.072711026)
  131. #endif
  132. /* Multiply an INT32 variable by an INT32 constant to yield an INT32 result.
  133. * For 8-bit samples with the recommended scaling, all the variable
  134. * and constant values involved are no more than 16 bits wide, so a
  135. * 16x16->32 bit multiply can be used instead of a full 32x32 multiply.
  136. * For 12-bit samples, a full 32-bit multiplication will be needed.
  137. */
  138. #if BITS_IN_JSAMPLE == 8
  139. #define MULTIPLY(var,const) MULTIPLY16C16(var,const)
  140. #else
  141. #define MULTIPLY(var,const) ((var) * (const))
  142. #endif
  143. /* Dequantize a coefficient by multiplying it by the multiplier-table
  144. * entry; produce an int result. In this module, both inputs and result
  145. * are 16 bits or less, so either int or short multiply will work.
  146. */
  147. #define DEQUANTIZE(coef,quantval) (((ISLOW_MULT_TYPE) (coef)) * (quantval))
  148. /*
  149. * Perform dequantization and inverse DCT on one block of coefficients.
  150. *
  151. * cK represents sqrt(2) * cos(K*pi/16).
  152. */
  153. GLOBAL(void)
  154. jpeg_idct_islow (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  155. JCOEFPTR coef_block,
  156. JSAMPARRAY output_buf, JDIMENSION output_col)
  157. {
  158. INT32 tmp0, tmp1, tmp2, tmp3;
  159. INT32 tmp10, tmp11, tmp12, tmp13;
  160. INT32 z1, z2, z3;
  161. JCOEFPTR inptr;
  162. ISLOW_MULT_TYPE * quantptr;
  163. int * wsptr;
  164. JSAMPROW outptr;
  165. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  166. int ctr;
  167. int workspace[DCTSIZE2]; /* buffers data between passes */
  168. SHIFT_TEMPS
  169. /* Pass 1: process columns from input, store into work array.
  170. * Note results are scaled up by sqrt(8) compared to a true IDCT;
  171. * furthermore, we scale the results by 2**PASS1_BITS.
  172. */
  173. inptr = coef_block;
  174. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  175. wsptr = workspace;
  176. for (ctr = DCTSIZE; ctr > 0; ctr--) {
  177. /* Due to quantization, we will usually find that many of the input
  178. * coefficients are zero, especially the AC terms. We can exploit this
  179. * by short-circuiting the IDCT calculation for any column in which all
  180. * the AC terms are zero. In that case each output is equal to the
  181. * DC coefficient (with scale factor as needed).
  182. * With typical images and quantization tables, half or more of the
  183. * column DCT calculations can be simplified this way.
  184. */
  185. if (inptr[DCTSIZE*1] == 0 && inptr[DCTSIZE*2] == 0 &&
  186. inptr[DCTSIZE*3] == 0 && inptr[DCTSIZE*4] == 0 &&
  187. inptr[DCTSIZE*5] == 0 && inptr[DCTSIZE*6] == 0 &&
  188. inptr[DCTSIZE*7] == 0) {
  189. /* AC terms all zero */
  190. int dcval = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) << PASS1_BITS;
  191. wsptr[DCTSIZE*0] = dcval;
  192. wsptr[DCTSIZE*1] = dcval;
  193. wsptr[DCTSIZE*2] = dcval;
  194. wsptr[DCTSIZE*3] = dcval;
  195. wsptr[DCTSIZE*4] = dcval;
  196. wsptr[DCTSIZE*5] = dcval;
  197. wsptr[DCTSIZE*6] = dcval;
  198. wsptr[DCTSIZE*7] = dcval;
  199. inptr++; /* advance pointers to next column */
  200. quantptr++;
  201. wsptr++;
  202. continue;
  203. }
  204. /* Even part: reverse the even part of the forward DCT.
  205. * The rotator is c(-6).
  206. */
  207. z2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  208. z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  209. z1 = MULTIPLY(z2 + z3, FIX_0_541196100); /* c6 */
  210. tmp2 = z1 + MULTIPLY(z2, FIX_0_765366865); /* c2-c6 */
  211. tmp3 = z1 - MULTIPLY(z3, FIX_1_847759065); /* c2+c6 */
  212. z2 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  213. z3 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  214. z2 <<= CONST_BITS;
  215. z3 <<= CONST_BITS;
  216. /* Add fudge factor here for final descale. */
  217. z2 += ONE << (CONST_BITS-PASS1_BITS-1);
  218. tmp0 = z2 + z3;
  219. tmp1 = z2 - z3;
  220. tmp10 = tmp0 + tmp2;
  221. tmp13 = tmp0 - tmp2;
  222. tmp11 = tmp1 + tmp3;
  223. tmp12 = tmp1 - tmp3;
  224. /* Odd part per figure 8; the matrix is unitary and hence its
  225. * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
  226. */
  227. tmp0 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  228. tmp1 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  229. tmp2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  230. tmp3 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  231. z2 = tmp0 + tmp2;
  232. z3 = tmp1 + tmp3;
  233. z1 = MULTIPLY(z2 + z3, FIX_1_175875602); /* c3 */
  234. z2 = MULTIPLY(z2, - FIX_1_961570560); /* -c3-c5 */
  235. z3 = MULTIPLY(z3, - FIX_0_390180644); /* -c3+c5 */
  236. z2 += z1;
  237. z3 += z1;
  238. z1 = MULTIPLY(tmp0 + tmp3, - FIX_0_899976223); /* -c3+c7 */
  239. tmp0 = MULTIPLY(tmp0, FIX_0_298631336); /* -c1+c3+c5-c7 */
  240. tmp3 = MULTIPLY(tmp3, FIX_1_501321110); /* c1+c3-c5-c7 */
  241. tmp0 += z1 + z2;
  242. tmp3 += z1 + z3;
  243. z1 = MULTIPLY(tmp1 + tmp2, - FIX_2_562915447); /* -c1-c3 */
  244. tmp1 = MULTIPLY(tmp1, FIX_2_053119869); /* c1+c3-c5+c7 */
  245. tmp2 = MULTIPLY(tmp2, FIX_3_072711026); /* c1+c3+c5-c7 */
  246. tmp1 += z1 + z3;
  247. tmp2 += z1 + z2;
  248. /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
  249. wsptr[DCTSIZE*0] = (int) RIGHT_SHIFT(tmp10 + tmp3, CONST_BITS-PASS1_BITS);
  250. wsptr[DCTSIZE*7] = (int) RIGHT_SHIFT(tmp10 - tmp3, CONST_BITS-PASS1_BITS);
  251. wsptr[DCTSIZE*1] = (int) RIGHT_SHIFT(tmp11 + tmp2, CONST_BITS-PASS1_BITS);
  252. wsptr[DCTSIZE*6] = (int) RIGHT_SHIFT(tmp11 - tmp2, CONST_BITS-PASS1_BITS);
  253. wsptr[DCTSIZE*2] = (int) RIGHT_SHIFT(tmp12 + tmp1, CONST_BITS-PASS1_BITS);
  254. wsptr[DCTSIZE*5] = (int) RIGHT_SHIFT(tmp12 - tmp1, CONST_BITS-PASS1_BITS);
  255. wsptr[DCTSIZE*3] = (int) RIGHT_SHIFT(tmp13 + tmp0, CONST_BITS-PASS1_BITS);
  256. wsptr[DCTSIZE*4] = (int) RIGHT_SHIFT(tmp13 - tmp0, CONST_BITS-PASS1_BITS);
  257. inptr++; /* advance pointers to next column */
  258. quantptr++;
  259. wsptr++;
  260. }
  261. /* Pass 2: process rows from work array, store into output array.
  262. * Note that we must descale the results by a factor of 8 == 2**3,
  263. * and also undo the PASS1_BITS scaling.
  264. */
  265. wsptr = workspace;
  266. for (ctr = 0; ctr < DCTSIZE; ctr++) {
  267. outptr = output_buf[ctr] + output_col;
  268. /* Rows of zeroes can be exploited in the same way as we did with columns.
  269. * However, the column calculation has created many nonzero AC terms, so
  270. * the simplification applies less often (typically 5% to 10% of the time).
  271. * On machines with very fast multiplication, it's possible that the
  272. * test takes more time than it's worth. In that case this section
  273. * may be commented out.
  274. */
  275. #ifndef NO_ZERO_ROW_TEST
  276. if (wsptr[1] == 0 && wsptr[2] == 0 && wsptr[3] == 0 && wsptr[4] == 0 &&
  277. wsptr[5] == 0 && wsptr[6] == 0 && wsptr[7] == 0) {
  278. /* AC terms all zero */
  279. JSAMPLE dcval = range_limit[(int) DESCALE((INT32) wsptr[0], PASS1_BITS+3)
  280. & RANGE_MASK];
  281. outptr[0] = dcval;
  282. outptr[1] = dcval;
  283. outptr[2] = dcval;
  284. outptr[3] = dcval;
  285. outptr[4] = dcval;
  286. outptr[5] = dcval;
  287. outptr[6] = dcval;
  288. outptr[7] = dcval;
  289. wsptr += DCTSIZE; /* advance pointer to next row */
  290. continue;
  291. }
  292. #endif
  293. /* Even part: reverse the even part of the forward DCT.
  294. * The rotator is c(-6).
  295. */
  296. z2 = (INT32) wsptr[2];
  297. z3 = (INT32) wsptr[6];
  298. z1 = MULTIPLY(z2 + z3, FIX_0_541196100); /* c6 */
  299. tmp2 = z1 + MULTIPLY(z2, FIX_0_765366865); /* c2-c6 */
  300. tmp3 = z1 - MULTIPLY(z3, FIX_1_847759065); /* c2+c6 */
  301. /* Add fudge factor here for final descale. */
  302. z2 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  303. z3 = (INT32) wsptr[4];
  304. tmp0 = (z2 + z3) << CONST_BITS;
  305. tmp1 = (z2 - z3) << CONST_BITS;
  306. tmp10 = tmp0 + tmp2;
  307. tmp13 = tmp0 - tmp2;
  308. tmp11 = tmp1 + tmp3;
  309. tmp12 = tmp1 - tmp3;
  310. /* Odd part per figure 8; the matrix is unitary and hence its
  311. * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
  312. */
  313. tmp0 = (INT32) wsptr[7];
  314. tmp1 = (INT32) wsptr[5];
  315. tmp2 = (INT32) wsptr[3];
  316. tmp3 = (INT32) wsptr[1];
  317. z2 = tmp0 + tmp2;
  318. z3 = tmp1 + tmp3;
  319. z1 = MULTIPLY(z2 + z3, FIX_1_175875602); /* c3 */
  320. z2 = MULTIPLY(z2, - FIX_1_961570560); /* -c3-c5 */
  321. z3 = MULTIPLY(z3, - FIX_0_390180644); /* -c3+c5 */
  322. z2 += z1;
  323. z3 += z1;
  324. z1 = MULTIPLY(tmp0 + tmp3, - FIX_0_899976223); /* -c3+c7 */
  325. tmp0 = MULTIPLY(tmp0, FIX_0_298631336); /* -c1+c3+c5-c7 */
  326. tmp3 = MULTIPLY(tmp3, FIX_1_501321110); /* c1+c3-c5-c7 */
  327. tmp0 += z1 + z2;
  328. tmp3 += z1 + z3;
  329. z1 = MULTIPLY(tmp1 + tmp2, - FIX_2_562915447); /* -c1-c3 */
  330. tmp1 = MULTIPLY(tmp1, FIX_2_053119869); /* c1+c3-c5+c7 */
  331. tmp2 = MULTIPLY(tmp2, FIX_3_072711026); /* c1+c3+c5-c7 */
  332. tmp1 += z1 + z3;
  333. tmp2 += z1 + z2;
  334. /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
  335. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp3,
  336. CONST_BITS+PASS1_BITS+3)
  337. & RANGE_MASK];
  338. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp3,
  339. CONST_BITS+PASS1_BITS+3)
  340. & RANGE_MASK];
  341. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp2,
  342. CONST_BITS+PASS1_BITS+3)
  343. & RANGE_MASK];
  344. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp2,
  345. CONST_BITS+PASS1_BITS+3)
  346. & RANGE_MASK];
  347. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp1,
  348. CONST_BITS+PASS1_BITS+3)
  349. & RANGE_MASK];
  350. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp1,
  351. CONST_BITS+PASS1_BITS+3)
  352. & RANGE_MASK];
  353. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp13 + tmp0,
  354. CONST_BITS+PASS1_BITS+3)
  355. & RANGE_MASK];
  356. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp13 - tmp0,
  357. CONST_BITS+PASS1_BITS+3)
  358. & RANGE_MASK];
  359. wsptr += DCTSIZE; /* advance pointer to next row */
  360. }
  361. }
  362. #ifdef IDCT_SCALING_SUPPORTED
  363. /*
  364. * Perform dequantization and inverse DCT on one block of coefficients,
  365. * producing a 7x7 output block.
  366. *
  367. * Optimized algorithm with 12 multiplications in the 1-D kernel.
  368. * cK represents sqrt(2) * cos(K*pi/14).
  369. */
  370. GLOBAL(void)
  371. jpeg_idct_7x7 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  372. JCOEFPTR coef_block,
  373. JSAMPARRAY output_buf, JDIMENSION output_col)
  374. {
  375. INT32 tmp0, tmp1, tmp2, tmp10, tmp11, tmp12, tmp13;
  376. INT32 z1, z2, z3;
  377. JCOEFPTR inptr;
  378. ISLOW_MULT_TYPE * quantptr;
  379. int * wsptr;
  380. JSAMPROW outptr;
  381. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  382. int ctr;
  383. int workspace[7*7]; /* buffers data between passes */
  384. SHIFT_TEMPS
  385. /* Pass 1: process columns from input, store into work array. */
  386. inptr = coef_block;
  387. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  388. wsptr = workspace;
  389. for (ctr = 0; ctr < 7; ctr++, inptr++, quantptr++, wsptr++) {
  390. /* Even part */
  391. tmp13 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  392. tmp13 <<= CONST_BITS;
  393. /* Add fudge factor here for final descale. */
  394. tmp13 += ONE << (CONST_BITS-PASS1_BITS-1);
  395. z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  396. z2 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  397. z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  398. tmp10 = MULTIPLY(z2 - z3, FIX(0.881747734)); /* c4 */
  399. tmp12 = MULTIPLY(z1 - z2, FIX(0.314692123)); /* c6 */
  400. tmp11 = tmp10 + tmp12 + tmp13 - MULTIPLY(z2, FIX(1.841218003)); /* c2+c4-c6 */
  401. tmp0 = z1 + z3;
  402. z2 -= tmp0;
  403. tmp0 = MULTIPLY(tmp0, FIX(1.274162392)) + tmp13; /* c2 */
  404. tmp10 += tmp0 - MULTIPLY(z3, FIX(0.077722536)); /* c2-c4-c6 */
  405. tmp12 += tmp0 - MULTIPLY(z1, FIX(2.470602249)); /* c2+c4+c6 */
  406. tmp13 += MULTIPLY(z2, FIX(1.414213562)); /* c0 */
  407. /* Odd part */
  408. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  409. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  410. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  411. tmp1 = MULTIPLY(z1 + z2, FIX(0.935414347)); /* (c3+c1-c5)/2 */
  412. tmp2 = MULTIPLY(z1 - z2, FIX(0.170262339)); /* (c3+c5-c1)/2 */
  413. tmp0 = tmp1 - tmp2;
  414. tmp1 += tmp2;
  415. tmp2 = MULTIPLY(z2 + z3, - FIX(1.378756276)); /* -c1 */
  416. tmp1 += tmp2;
  417. z2 = MULTIPLY(z1 + z3, FIX(0.613604268)); /* c5 */
  418. tmp0 += z2;
  419. tmp2 += z2 + MULTIPLY(z3, FIX(1.870828693)); /* c3+c1-c5 */
  420. /* Final output stage */
  421. wsptr[7*0] = (int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS-PASS1_BITS);
  422. wsptr[7*6] = (int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS-PASS1_BITS);
  423. wsptr[7*1] = (int) RIGHT_SHIFT(tmp11 + tmp1, CONST_BITS-PASS1_BITS);
  424. wsptr[7*5] = (int) RIGHT_SHIFT(tmp11 - tmp1, CONST_BITS-PASS1_BITS);
  425. wsptr[7*2] = (int) RIGHT_SHIFT(tmp12 + tmp2, CONST_BITS-PASS1_BITS);
  426. wsptr[7*4] = (int) RIGHT_SHIFT(tmp12 - tmp2, CONST_BITS-PASS1_BITS);
  427. wsptr[7*3] = (int) RIGHT_SHIFT(tmp13, CONST_BITS-PASS1_BITS);
  428. }
  429. /* Pass 2: process 7 rows from work array, store into output array. */
  430. wsptr = workspace;
  431. for (ctr = 0; ctr < 7; ctr++) {
  432. outptr = output_buf[ctr] + output_col;
  433. /* Even part */
  434. /* Add fudge factor here for final descale. */
  435. tmp13 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  436. tmp13 <<= CONST_BITS;
  437. z1 = (INT32) wsptr[2];
  438. z2 = (INT32) wsptr[4];
  439. z3 = (INT32) wsptr[6];
  440. tmp10 = MULTIPLY(z2 - z3, FIX(0.881747734)); /* c4 */
  441. tmp12 = MULTIPLY(z1 - z2, FIX(0.314692123)); /* c6 */
  442. tmp11 = tmp10 + tmp12 + tmp13 - MULTIPLY(z2, FIX(1.841218003)); /* c2+c4-c6 */
  443. tmp0 = z1 + z3;
  444. z2 -= tmp0;
  445. tmp0 = MULTIPLY(tmp0, FIX(1.274162392)) + tmp13; /* c2 */
  446. tmp10 += tmp0 - MULTIPLY(z3, FIX(0.077722536)); /* c2-c4-c6 */
  447. tmp12 += tmp0 - MULTIPLY(z1, FIX(2.470602249)); /* c2+c4+c6 */
  448. tmp13 += MULTIPLY(z2, FIX(1.414213562)); /* c0 */
  449. /* Odd part */
  450. z1 = (INT32) wsptr[1];
  451. z2 = (INT32) wsptr[3];
  452. z3 = (INT32) wsptr[5];
  453. tmp1 = MULTIPLY(z1 + z2, FIX(0.935414347)); /* (c3+c1-c5)/2 */
  454. tmp2 = MULTIPLY(z1 - z2, FIX(0.170262339)); /* (c3+c5-c1)/2 */
  455. tmp0 = tmp1 - tmp2;
  456. tmp1 += tmp2;
  457. tmp2 = MULTIPLY(z2 + z3, - FIX(1.378756276)); /* -c1 */
  458. tmp1 += tmp2;
  459. z2 = MULTIPLY(z1 + z3, FIX(0.613604268)); /* c5 */
  460. tmp0 += z2;
  461. tmp2 += z2 + MULTIPLY(z3, FIX(1.870828693)); /* c3+c1-c5 */
  462. /* Final output stage */
  463. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0,
  464. CONST_BITS+PASS1_BITS+3)
  465. & RANGE_MASK];
  466. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0,
  467. CONST_BITS+PASS1_BITS+3)
  468. & RANGE_MASK];
  469. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp1,
  470. CONST_BITS+PASS1_BITS+3)
  471. & RANGE_MASK];
  472. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp1,
  473. CONST_BITS+PASS1_BITS+3)
  474. & RANGE_MASK];
  475. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp2,
  476. CONST_BITS+PASS1_BITS+3)
  477. & RANGE_MASK];
  478. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp2,
  479. CONST_BITS+PASS1_BITS+3)
  480. & RANGE_MASK];
  481. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp13,
  482. CONST_BITS+PASS1_BITS+3)
  483. & RANGE_MASK];
  484. wsptr += 7; /* advance pointer to next row */
  485. }
  486. }
  487. /*
  488. * Perform dequantization and inverse DCT on one block of coefficients,
  489. * producing a reduced-size 6x6 output block.
  490. *
  491. * Optimized algorithm with 3 multiplications in the 1-D kernel.
  492. * cK represents sqrt(2) * cos(K*pi/12).
  493. */
  494. GLOBAL(void)
  495. jpeg_idct_6x6 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  496. JCOEFPTR coef_block,
  497. JSAMPARRAY output_buf, JDIMENSION output_col)
  498. {
  499. INT32 tmp0, tmp1, tmp2, tmp10, tmp11, tmp12;
  500. INT32 z1, z2, z3;
  501. JCOEFPTR inptr;
  502. ISLOW_MULT_TYPE * quantptr;
  503. int * wsptr;
  504. JSAMPROW outptr;
  505. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  506. int ctr;
  507. int workspace[6*6]; /* buffers data between passes */
  508. SHIFT_TEMPS
  509. /* Pass 1: process columns from input, store into work array. */
  510. inptr = coef_block;
  511. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  512. wsptr = workspace;
  513. for (ctr = 0; ctr < 6; ctr++, inptr++, quantptr++, wsptr++) {
  514. /* Even part */
  515. tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  516. tmp0 <<= CONST_BITS;
  517. /* Add fudge factor here for final descale. */
  518. tmp0 += ONE << (CONST_BITS-PASS1_BITS-1);
  519. tmp2 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  520. tmp10 = MULTIPLY(tmp2, FIX(0.707106781)); /* c4 */
  521. tmp1 = tmp0 + tmp10;
  522. tmp11 = RIGHT_SHIFT(tmp0 - tmp10 - tmp10, CONST_BITS-PASS1_BITS);
  523. tmp10 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  524. tmp0 = MULTIPLY(tmp10, FIX(1.224744871)); /* c2 */
  525. tmp10 = tmp1 + tmp0;
  526. tmp12 = tmp1 - tmp0;
  527. /* Odd part */
  528. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  529. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  530. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  531. tmp1 = MULTIPLY(z1 + z3, FIX(0.366025404)); /* c5 */
  532. tmp0 = tmp1 + ((z1 + z2) << CONST_BITS);
  533. tmp2 = tmp1 + ((z3 - z2) << CONST_BITS);
  534. tmp1 = (z1 - z2 - z3) << PASS1_BITS;
  535. /* Final output stage */
  536. wsptr[6*0] = (int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS-PASS1_BITS);
  537. wsptr[6*5] = (int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS-PASS1_BITS);
  538. wsptr[6*1] = (int) (tmp11 + tmp1);
  539. wsptr[6*4] = (int) (tmp11 - tmp1);
  540. wsptr[6*2] = (int) RIGHT_SHIFT(tmp12 + tmp2, CONST_BITS-PASS1_BITS);
  541. wsptr[6*3] = (int) RIGHT_SHIFT(tmp12 - tmp2, CONST_BITS-PASS1_BITS);
  542. }
  543. /* Pass 2: process 6 rows from work array, store into output array. */
  544. wsptr = workspace;
  545. for (ctr = 0; ctr < 6; ctr++) {
  546. outptr = output_buf[ctr] + output_col;
  547. /* Even part */
  548. /* Add fudge factor here for final descale. */
  549. tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  550. tmp0 <<= CONST_BITS;
  551. tmp2 = (INT32) wsptr[4];
  552. tmp10 = MULTIPLY(tmp2, FIX(0.707106781)); /* c4 */
  553. tmp1 = tmp0 + tmp10;
  554. tmp11 = tmp0 - tmp10 - tmp10;
  555. tmp10 = (INT32) wsptr[2];
  556. tmp0 = MULTIPLY(tmp10, FIX(1.224744871)); /* c2 */
  557. tmp10 = tmp1 + tmp0;
  558. tmp12 = tmp1 - tmp0;
  559. /* Odd part */
  560. z1 = (INT32) wsptr[1];
  561. z2 = (INT32) wsptr[3];
  562. z3 = (INT32) wsptr[5];
  563. tmp1 = MULTIPLY(z1 + z3, FIX(0.366025404)); /* c5 */
  564. tmp0 = tmp1 + ((z1 + z2) << CONST_BITS);
  565. tmp2 = tmp1 + ((z3 - z2) << CONST_BITS);
  566. tmp1 = (z1 - z2 - z3) << CONST_BITS;
  567. /* Final output stage */
  568. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0,
  569. CONST_BITS+PASS1_BITS+3)
  570. & RANGE_MASK];
  571. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0,
  572. CONST_BITS+PASS1_BITS+3)
  573. & RANGE_MASK];
  574. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp1,
  575. CONST_BITS+PASS1_BITS+3)
  576. & RANGE_MASK];
  577. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp1,
  578. CONST_BITS+PASS1_BITS+3)
  579. & RANGE_MASK];
  580. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp2,
  581. CONST_BITS+PASS1_BITS+3)
  582. & RANGE_MASK];
  583. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp2,
  584. CONST_BITS+PASS1_BITS+3)
  585. & RANGE_MASK];
  586. wsptr += 6; /* advance pointer to next row */
  587. }
  588. }
  589. /*
  590. * Perform dequantization and inverse DCT on one block of coefficients,
  591. * producing a reduced-size 5x5 output block.
  592. *
  593. * Optimized algorithm with 5 multiplications in the 1-D kernel.
  594. * cK represents sqrt(2) * cos(K*pi/10).
  595. */
  596. GLOBAL(void)
  597. jpeg_idct_5x5 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  598. JCOEFPTR coef_block,
  599. JSAMPARRAY output_buf, JDIMENSION output_col)
  600. {
  601. INT32 tmp0, tmp1, tmp10, tmp11, tmp12;
  602. INT32 z1, z2, z3;
  603. JCOEFPTR inptr;
  604. ISLOW_MULT_TYPE * quantptr;
  605. int * wsptr;
  606. JSAMPROW outptr;
  607. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  608. int ctr;
  609. int workspace[5*5]; /* buffers data between passes */
  610. SHIFT_TEMPS
  611. /* Pass 1: process columns from input, store into work array. */
  612. inptr = coef_block;
  613. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  614. wsptr = workspace;
  615. for (ctr = 0; ctr < 5; ctr++, inptr++, quantptr++, wsptr++) {
  616. /* Even part */
  617. tmp12 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  618. tmp12 <<= CONST_BITS;
  619. /* Add fudge factor here for final descale. */
  620. tmp12 += ONE << (CONST_BITS-PASS1_BITS-1);
  621. tmp0 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  622. tmp1 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  623. z1 = MULTIPLY(tmp0 + tmp1, FIX(0.790569415)); /* (c2+c4)/2 */
  624. z2 = MULTIPLY(tmp0 - tmp1, FIX(0.353553391)); /* (c2-c4)/2 */
  625. z3 = tmp12 + z2;
  626. tmp10 = z3 + z1;
  627. tmp11 = z3 - z1;
  628. tmp12 -= z2 << 2;
  629. /* Odd part */
  630. z2 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  631. z3 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  632. z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c3 */
  633. tmp0 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c1-c3 */
  634. tmp1 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c1+c3 */
  635. /* Final output stage */
  636. wsptr[5*0] = (int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS-PASS1_BITS);
  637. wsptr[5*4] = (int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS-PASS1_BITS);
  638. wsptr[5*1] = (int) RIGHT_SHIFT(tmp11 + tmp1, CONST_BITS-PASS1_BITS);
  639. wsptr[5*3] = (int) RIGHT_SHIFT(tmp11 - tmp1, CONST_BITS-PASS1_BITS);
  640. wsptr[5*2] = (int) RIGHT_SHIFT(tmp12, CONST_BITS-PASS1_BITS);
  641. }
  642. /* Pass 2: process 5 rows from work array, store into output array. */
  643. wsptr = workspace;
  644. for (ctr = 0; ctr < 5; ctr++) {
  645. outptr = output_buf[ctr] + output_col;
  646. /* Even part */
  647. /* Add fudge factor here for final descale. */
  648. tmp12 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  649. tmp12 <<= CONST_BITS;
  650. tmp0 = (INT32) wsptr[2];
  651. tmp1 = (INT32) wsptr[4];
  652. z1 = MULTIPLY(tmp0 + tmp1, FIX(0.790569415)); /* (c2+c4)/2 */
  653. z2 = MULTIPLY(tmp0 - tmp1, FIX(0.353553391)); /* (c2-c4)/2 */
  654. z3 = tmp12 + z2;
  655. tmp10 = z3 + z1;
  656. tmp11 = z3 - z1;
  657. tmp12 -= z2 << 2;
  658. /* Odd part */
  659. z2 = (INT32) wsptr[1];
  660. z3 = (INT32) wsptr[3];
  661. z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c3 */
  662. tmp0 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c1-c3 */
  663. tmp1 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c1+c3 */
  664. /* Final output stage */
  665. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0,
  666. CONST_BITS+PASS1_BITS+3)
  667. & RANGE_MASK];
  668. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0,
  669. CONST_BITS+PASS1_BITS+3)
  670. & RANGE_MASK];
  671. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp1,
  672. CONST_BITS+PASS1_BITS+3)
  673. & RANGE_MASK];
  674. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp1,
  675. CONST_BITS+PASS1_BITS+3)
  676. & RANGE_MASK];
  677. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12,
  678. CONST_BITS+PASS1_BITS+3)
  679. & RANGE_MASK];
  680. wsptr += 5; /* advance pointer to next row */
  681. }
  682. }
  683. /*
  684. * Perform dequantization and inverse DCT on one block of coefficients,
  685. * producing a reduced-size 4x4 output block.
  686. *
  687. * Optimized algorithm with 3 multiplications in the 1-D kernel.
  688. * cK represents sqrt(2) * cos(K*pi/16) [refers to 8-point IDCT].
  689. */
  690. GLOBAL(void)
  691. jpeg_idct_4x4 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  692. JCOEFPTR coef_block,
  693. JSAMPARRAY output_buf, JDIMENSION output_col)
  694. {
  695. INT32 tmp0, tmp2, tmp10, tmp12;
  696. INT32 z1, z2, z3;
  697. JCOEFPTR inptr;
  698. ISLOW_MULT_TYPE * quantptr;
  699. int * wsptr;
  700. JSAMPROW outptr;
  701. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  702. int ctr;
  703. int workspace[4*4]; /* buffers data between passes */
  704. SHIFT_TEMPS
  705. /* Pass 1: process columns from input, store into work array. */
  706. inptr = coef_block;
  707. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  708. wsptr = workspace;
  709. for (ctr = 0; ctr < 4; ctr++, inptr++, quantptr++, wsptr++) {
  710. /* Even part */
  711. tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  712. tmp2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  713. tmp10 = (tmp0 + tmp2) << PASS1_BITS;
  714. tmp12 = (tmp0 - tmp2) << PASS1_BITS;
  715. /* Odd part */
  716. /* Same rotation as in the even part of the 8x8 LL&M IDCT */
  717. z2 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  718. z3 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  719. z1 = MULTIPLY(z2 + z3, FIX_0_541196100); /* c6 */
  720. /* Add fudge factor here for final descale. */
  721. z1 += ONE << (CONST_BITS-PASS1_BITS-1);
  722. tmp0 = RIGHT_SHIFT(z1 + MULTIPLY(z2, FIX_0_765366865), /* c2-c6 */
  723. CONST_BITS-PASS1_BITS);
  724. tmp2 = RIGHT_SHIFT(z1 - MULTIPLY(z3, FIX_1_847759065), /* c2+c6 */
  725. CONST_BITS-PASS1_BITS);
  726. /* Final output stage */
  727. wsptr[4*0] = (int) (tmp10 + tmp0);
  728. wsptr[4*3] = (int) (tmp10 - tmp0);
  729. wsptr[4*1] = (int) (tmp12 + tmp2);
  730. wsptr[4*2] = (int) (tmp12 - tmp2);
  731. }
  732. /* Pass 2: process 4 rows from work array, store into output array. */
  733. wsptr = workspace;
  734. for (ctr = 0; ctr < 4; ctr++) {
  735. outptr = output_buf[ctr] + output_col;
  736. /* Even part */
  737. /* Add fudge factor here for final descale. */
  738. tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  739. tmp2 = (INT32) wsptr[2];
  740. tmp10 = (tmp0 + tmp2) << CONST_BITS;
  741. tmp12 = (tmp0 - tmp2) << CONST_BITS;
  742. /* Odd part */
  743. /* Same rotation as in the even part of the 8x8 LL&M IDCT */
  744. z2 = (INT32) wsptr[1];
  745. z3 = (INT32) wsptr[3];
  746. z1 = MULTIPLY(z2 + z3, FIX_0_541196100); /* c6 */
  747. tmp0 = z1 + MULTIPLY(z2, FIX_0_765366865); /* c2-c6 */
  748. tmp2 = z1 - MULTIPLY(z3, FIX_1_847759065); /* c2+c6 */
  749. /* Final output stage */
  750. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0,
  751. CONST_BITS+PASS1_BITS+3)
  752. & RANGE_MASK];
  753. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0,
  754. CONST_BITS+PASS1_BITS+3)
  755. & RANGE_MASK];
  756. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp2,
  757. CONST_BITS+PASS1_BITS+3)
  758. & RANGE_MASK];
  759. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp2,
  760. CONST_BITS+PASS1_BITS+3)
  761. & RANGE_MASK];
  762. wsptr += 4; /* advance pointer to next row */
  763. }
  764. }
  765. /*
  766. * Perform dequantization and inverse DCT on one block of coefficients,
  767. * producing a reduced-size 3x3 output block.
  768. *
  769. * Optimized algorithm with 2 multiplications in the 1-D kernel.
  770. * cK represents sqrt(2) * cos(K*pi/6).
  771. */
  772. GLOBAL(void)
  773. jpeg_idct_3x3 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  774. JCOEFPTR coef_block,
  775. JSAMPARRAY output_buf, JDIMENSION output_col)
  776. {
  777. INT32 tmp0, tmp2, tmp10, tmp12;
  778. JCOEFPTR inptr;
  779. ISLOW_MULT_TYPE * quantptr;
  780. int * wsptr;
  781. JSAMPROW outptr;
  782. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  783. int ctr;
  784. int workspace[3*3]; /* buffers data between passes */
  785. SHIFT_TEMPS
  786. /* Pass 1: process columns from input, store into work array. */
  787. inptr = coef_block;
  788. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  789. wsptr = workspace;
  790. for (ctr = 0; ctr < 3; ctr++, inptr++, quantptr++, wsptr++) {
  791. /* Even part */
  792. tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  793. tmp0 <<= CONST_BITS;
  794. /* Add fudge factor here for final descale. */
  795. tmp0 += ONE << (CONST_BITS-PASS1_BITS-1);
  796. tmp2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  797. tmp12 = MULTIPLY(tmp2, FIX(0.707106781)); /* c2 */
  798. tmp10 = tmp0 + tmp12;
  799. tmp2 = tmp0 - tmp12 - tmp12;
  800. /* Odd part */
  801. tmp12 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  802. tmp0 = MULTIPLY(tmp12, FIX(1.224744871)); /* c1 */
  803. /* Final output stage */
  804. wsptr[3*0] = (int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS-PASS1_BITS);
  805. wsptr[3*2] = (int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS-PASS1_BITS);
  806. wsptr[3*1] = (int) RIGHT_SHIFT(tmp2, CONST_BITS-PASS1_BITS);
  807. }
  808. /* Pass 2: process 3 rows from work array, store into output array. */
  809. wsptr = workspace;
  810. for (ctr = 0; ctr < 3; ctr++) {
  811. outptr = output_buf[ctr] + output_col;
  812. /* Even part */
  813. /* Add fudge factor here for final descale. */
  814. tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  815. tmp0 <<= CONST_BITS;
  816. tmp2 = (INT32) wsptr[2];
  817. tmp12 = MULTIPLY(tmp2, FIX(0.707106781)); /* c2 */
  818. tmp10 = tmp0 + tmp12;
  819. tmp2 = tmp0 - tmp12 - tmp12;
  820. /* Odd part */
  821. tmp12 = (INT32) wsptr[1];
  822. tmp0 = MULTIPLY(tmp12, FIX(1.224744871)); /* c1 */
  823. /* Final output stage */
  824. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0,
  825. CONST_BITS+PASS1_BITS+3)
  826. & RANGE_MASK];
  827. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0,
  828. CONST_BITS+PASS1_BITS+3)
  829. & RANGE_MASK];
  830. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp2,
  831. CONST_BITS+PASS1_BITS+3)
  832. & RANGE_MASK];
  833. wsptr += 3; /* advance pointer to next row */
  834. }
  835. }
  836. /*
  837. * Perform dequantization and inverse DCT on one block of coefficients,
  838. * producing a reduced-size 2x2 output block.
  839. *
  840. * Multiplication-less algorithm.
  841. */
  842. GLOBAL(void)
  843. jpeg_idct_2x2 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  844. JCOEFPTR coef_block,
  845. JSAMPARRAY output_buf, JDIMENSION output_col)
  846. {
  847. INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
  848. ISLOW_MULT_TYPE * quantptr;
  849. JSAMPROW outptr;
  850. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  851. SHIFT_TEMPS
  852. /* Pass 1: process columns from input. */
  853. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  854. /* Column 0 */
  855. tmp4 = DEQUANTIZE(coef_block[DCTSIZE*0], quantptr[DCTSIZE*0]);
  856. tmp5 = DEQUANTIZE(coef_block[DCTSIZE*1], quantptr[DCTSIZE*1]);
  857. /* Add fudge factor here for final descale. */
  858. tmp4 += ONE << 2;
  859. tmp0 = tmp4 + tmp5;
  860. tmp2 = tmp4 - tmp5;
  861. /* Column 1 */
  862. tmp4 = DEQUANTIZE(coef_block[DCTSIZE*0+1], quantptr[DCTSIZE*0+1]);
  863. tmp5 = DEQUANTIZE(coef_block[DCTSIZE*1+1], quantptr[DCTSIZE*1+1]);
  864. tmp1 = tmp4 + tmp5;
  865. tmp3 = tmp4 - tmp5;
  866. /* Pass 2: process 2 rows, store into output array. */
  867. /* Row 0 */
  868. outptr = output_buf[0] + output_col;
  869. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp0 + tmp1, 3) & RANGE_MASK];
  870. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp0 - tmp1, 3) & RANGE_MASK];
  871. /* Row 1 */
  872. outptr = output_buf[1] + output_col;
  873. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp2 + tmp3, 3) & RANGE_MASK];
  874. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp2 - tmp3, 3) & RANGE_MASK];
  875. }
  876. /*
  877. * Perform dequantization and inverse DCT on one block of coefficients,
  878. * producing a reduced-size 1x1 output block.
  879. *
  880. * We hardly need an inverse DCT routine for this: just take the
  881. * average pixel value, which is one-eighth of the DC coefficient.
  882. */
  883. GLOBAL(void)
  884. jpeg_idct_1x1 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  885. JCOEFPTR coef_block,
  886. JSAMPARRAY output_buf, JDIMENSION output_col)
  887. {
  888. int dcval;
  889. ISLOW_MULT_TYPE * quantptr;
  890. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  891. SHIFT_TEMPS
  892. /* 1x1 is trivial: just take the DC coefficient divided by 8. */
  893. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  894. dcval = DEQUANTIZE(coef_block[0], quantptr[0]);
  895. dcval = (int) DESCALE((INT32) dcval, 3);
  896. output_buf[0][output_col] = range_limit[dcval & RANGE_MASK];
  897. }
  898. /*
  899. * Perform dequantization and inverse DCT on one block of coefficients,
  900. * producing a 9x9 output block.
  901. *
  902. * Optimized algorithm with 10 multiplications in the 1-D kernel.
  903. * cK represents sqrt(2) * cos(K*pi/18).
  904. */
  905. GLOBAL(void)
  906. jpeg_idct_9x9 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  907. JCOEFPTR coef_block,
  908. JSAMPARRAY output_buf, JDIMENSION output_col)
  909. {
  910. INT32 tmp0, tmp1, tmp2, tmp3, tmp10, tmp11, tmp12, tmp13, tmp14;
  911. INT32 z1, z2, z3, z4;
  912. JCOEFPTR inptr;
  913. ISLOW_MULT_TYPE * quantptr;
  914. int * wsptr;
  915. JSAMPROW outptr;
  916. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  917. int ctr;
  918. int workspace[8*9]; /* buffers data between passes */
  919. SHIFT_TEMPS
  920. /* Pass 1: process columns from input, store into work array. */
  921. inptr = coef_block;
  922. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  923. wsptr = workspace;
  924. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  925. /* Even part */
  926. tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  927. tmp0 <<= CONST_BITS;
  928. /* Add fudge factor here for final descale. */
  929. tmp0 += ONE << (CONST_BITS-PASS1_BITS-1);
  930. z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  931. z2 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  932. z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  933. tmp3 = MULTIPLY(z3, FIX(0.707106781)); /* c6 */
  934. tmp1 = tmp0 + tmp3;
  935. tmp2 = tmp0 - tmp3 - tmp3;
  936. tmp0 = MULTIPLY(z1 - z2, FIX(0.707106781)); /* c6 */
  937. tmp11 = tmp2 + tmp0;
  938. tmp14 = tmp2 - tmp0 - tmp0;
  939. tmp0 = MULTIPLY(z1 + z2, FIX(1.328926049)); /* c2 */
  940. tmp2 = MULTIPLY(z1, FIX(1.083350441)); /* c4 */
  941. tmp3 = MULTIPLY(z2, FIX(0.245575608)); /* c8 */
  942. tmp10 = tmp1 + tmp0 - tmp3;
  943. tmp12 = tmp1 - tmp0 + tmp2;
  944. tmp13 = tmp1 - tmp2 + tmp3;
  945. /* Odd part */
  946. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  947. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  948. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  949. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  950. z2 = MULTIPLY(z2, - FIX(1.224744871)); /* -c3 */
  951. tmp2 = MULTIPLY(z1 + z3, FIX(0.909038955)); /* c5 */
  952. tmp3 = MULTIPLY(z1 + z4, FIX(0.483689525)); /* c7 */
  953. tmp0 = tmp2 + tmp3 - z2;
  954. tmp1 = MULTIPLY(z3 - z4, FIX(1.392728481)); /* c1 */
  955. tmp2 += z2 - tmp1;
  956. tmp3 += z2 + tmp1;
  957. tmp1 = MULTIPLY(z1 - z3 - z4, FIX(1.224744871)); /* c3 */
  958. /* Final output stage */
  959. wsptr[8*0] = (int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS-PASS1_BITS);
  960. wsptr[8*8] = (int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS-PASS1_BITS);
  961. wsptr[8*1] = (int) RIGHT_SHIFT(tmp11 + tmp1, CONST_BITS-PASS1_BITS);
  962. wsptr[8*7] = (int) RIGHT_SHIFT(tmp11 - tmp1, CONST_BITS-PASS1_BITS);
  963. wsptr[8*2] = (int) RIGHT_SHIFT(tmp12 + tmp2, CONST_BITS-PASS1_BITS);
  964. wsptr[8*6] = (int) RIGHT_SHIFT(tmp12 - tmp2, CONST_BITS-PASS1_BITS);
  965. wsptr[8*3] = (int) RIGHT_SHIFT(tmp13 + tmp3, CONST_BITS-PASS1_BITS);
  966. wsptr[8*5] = (int) RIGHT_SHIFT(tmp13 - tmp3, CONST_BITS-PASS1_BITS);
  967. wsptr[8*4] = (int) RIGHT_SHIFT(tmp14, CONST_BITS-PASS1_BITS);
  968. }
  969. /* Pass 2: process 9 rows from work array, store into output array. */
  970. wsptr = workspace;
  971. for (ctr = 0; ctr < 9; ctr++) {
  972. outptr = output_buf[ctr] + output_col;
  973. /* Even part */
  974. /* Add fudge factor here for final descale. */
  975. tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  976. tmp0 <<= CONST_BITS;
  977. z1 = (INT32) wsptr[2];
  978. z2 = (INT32) wsptr[4];
  979. z3 = (INT32) wsptr[6];
  980. tmp3 = MULTIPLY(z3, FIX(0.707106781)); /* c6 */
  981. tmp1 = tmp0 + tmp3;
  982. tmp2 = tmp0 - tmp3 - tmp3;
  983. tmp0 = MULTIPLY(z1 - z2, FIX(0.707106781)); /* c6 */
  984. tmp11 = tmp2 + tmp0;
  985. tmp14 = tmp2 - tmp0 - tmp0;
  986. tmp0 = MULTIPLY(z1 + z2, FIX(1.328926049)); /* c2 */
  987. tmp2 = MULTIPLY(z1, FIX(1.083350441)); /* c4 */
  988. tmp3 = MULTIPLY(z2, FIX(0.245575608)); /* c8 */
  989. tmp10 = tmp1 + tmp0 - tmp3;
  990. tmp12 = tmp1 - tmp0 + tmp2;
  991. tmp13 = tmp1 - tmp2 + tmp3;
  992. /* Odd part */
  993. z1 = (INT32) wsptr[1];
  994. z2 = (INT32) wsptr[3];
  995. z3 = (INT32) wsptr[5];
  996. z4 = (INT32) wsptr[7];
  997. z2 = MULTIPLY(z2, - FIX(1.224744871)); /* -c3 */
  998. tmp2 = MULTIPLY(z1 + z3, FIX(0.909038955)); /* c5 */
  999. tmp3 = MULTIPLY(z1 + z4, FIX(0.483689525)); /* c7 */
  1000. tmp0 = tmp2 + tmp3 - z2;
  1001. tmp1 = MULTIPLY(z3 - z4, FIX(1.392728481)); /* c1 */
  1002. tmp2 += z2 - tmp1;
  1003. tmp3 += z2 + tmp1;
  1004. tmp1 = MULTIPLY(z1 - z3 - z4, FIX(1.224744871)); /* c3 */
  1005. /* Final output stage */
  1006. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0,
  1007. CONST_BITS+PASS1_BITS+3)
  1008. & RANGE_MASK];
  1009. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0,
  1010. CONST_BITS+PASS1_BITS+3)
  1011. & RANGE_MASK];
  1012. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp1,
  1013. CONST_BITS+PASS1_BITS+3)
  1014. & RANGE_MASK];
  1015. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp1,
  1016. CONST_BITS+PASS1_BITS+3)
  1017. & RANGE_MASK];
  1018. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp2,
  1019. CONST_BITS+PASS1_BITS+3)
  1020. & RANGE_MASK];
  1021. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp2,
  1022. CONST_BITS+PASS1_BITS+3)
  1023. & RANGE_MASK];
  1024. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp13 + tmp3,
  1025. CONST_BITS+PASS1_BITS+3)
  1026. & RANGE_MASK];
  1027. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp13 - tmp3,
  1028. CONST_BITS+PASS1_BITS+3)
  1029. & RANGE_MASK];
  1030. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp14,
  1031. CONST_BITS+PASS1_BITS+3)
  1032. & RANGE_MASK];
  1033. wsptr += 8; /* advance pointer to next row */
  1034. }
  1035. }
  1036. /*
  1037. * Perform dequantization and inverse DCT on one block of coefficients,
  1038. * producing a 10x10 output block.
  1039. *
  1040. * Optimized algorithm with 12 multiplications in the 1-D kernel.
  1041. * cK represents sqrt(2) * cos(K*pi/20).
  1042. */
  1043. GLOBAL(void)
  1044. jpeg_idct_10x10 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  1045. JCOEFPTR coef_block,
  1046. JSAMPARRAY output_buf, JDIMENSION output_col)
  1047. {
  1048. INT32 tmp10, tmp11, tmp12, tmp13, tmp14;
  1049. INT32 tmp20, tmp21, tmp22, tmp23, tmp24;
  1050. INT32 z1, z2, z3, z4, z5;
  1051. JCOEFPTR inptr;
  1052. ISLOW_MULT_TYPE * quantptr;
  1053. int * wsptr;
  1054. JSAMPROW outptr;
  1055. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  1056. int ctr;
  1057. int workspace[8*10]; /* buffers data between passes */
  1058. SHIFT_TEMPS
  1059. /* Pass 1: process columns from input, store into work array. */
  1060. inptr = coef_block;
  1061. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  1062. wsptr = workspace;
  1063. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  1064. /* Even part */
  1065. z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  1066. z3 <<= CONST_BITS;
  1067. /* Add fudge factor here for final descale. */
  1068. z3 += ONE << (CONST_BITS-PASS1_BITS-1);
  1069. z4 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  1070. z1 = MULTIPLY(z4, FIX(1.144122806)); /* c4 */
  1071. z2 = MULTIPLY(z4, FIX(0.437016024)); /* c8 */
  1072. tmp10 = z3 + z1;
  1073. tmp11 = z3 - z2;
  1074. tmp22 = RIGHT_SHIFT(z3 - ((z1 - z2) << 1), /* c0 = (c4-c8)*2 */
  1075. CONST_BITS-PASS1_BITS);
  1076. z2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  1077. z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  1078. z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c6 */
  1079. tmp12 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c2-c6 */
  1080. tmp13 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c2+c6 */
  1081. tmp20 = tmp10 + tmp12;
  1082. tmp24 = tmp10 - tmp12;
  1083. tmp21 = tmp11 + tmp13;
  1084. tmp23 = tmp11 - tmp13;
  1085. /* Odd part */
  1086. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  1087. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  1088. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  1089. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  1090. tmp11 = z2 + z4;
  1091. tmp13 = z2 - z4;
  1092. tmp12 = MULTIPLY(tmp13, FIX(0.309016994)); /* (c3-c7)/2 */
  1093. z5 = z3 << CONST_BITS;
  1094. z2 = MULTIPLY(tmp11, FIX(0.951056516)); /* (c3+c7)/2 */
  1095. z4 = z5 + tmp12;
  1096. tmp10 = MULTIPLY(z1, FIX(1.396802247)) + z2 + z4; /* c1 */
  1097. tmp14 = MULTIPLY(z1, FIX(0.221231742)) - z2 + z4; /* c9 */
  1098. z2 = MULTIPLY(tmp11, FIX(0.587785252)); /* (c1-c9)/2 */
  1099. z4 = z5 - tmp12 - (tmp13 << (CONST_BITS - 1));
  1100. tmp12 = (z1 - tmp13 - z3) << PASS1_BITS;
  1101. tmp11 = MULTIPLY(z1, FIX(1.260073511)) - z2 - z4; /* c3 */
  1102. tmp13 = MULTIPLY(z1, FIX(0.642039522)) - z2 + z4; /* c7 */
  1103. /* Final output stage */
  1104. wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS);
  1105. wsptr[8*9] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS);
  1106. wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS);
  1107. wsptr[8*8] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS);
  1108. wsptr[8*2] = (int) (tmp22 + tmp12);
  1109. wsptr[8*7] = (int) (tmp22 - tmp12);
  1110. wsptr[8*3] = (int) RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS-PASS1_BITS);
  1111. wsptr[8*6] = (int) RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS-PASS1_BITS);
  1112. wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS);
  1113. wsptr[8*5] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS);
  1114. }
  1115. /* Pass 2: process 10 rows from work array, store into output array. */
  1116. wsptr = workspace;
  1117. for (ctr = 0; ctr < 10; ctr++) {
  1118. outptr = output_buf[ctr] + output_col;
  1119. /* Even part */
  1120. /* Add fudge factor here for final descale. */
  1121. z3 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  1122. z3 <<= CONST_BITS;
  1123. z4 = (INT32) wsptr[4];
  1124. z1 = MULTIPLY(z4, FIX(1.144122806)); /* c4 */
  1125. z2 = MULTIPLY(z4, FIX(0.437016024)); /* c8 */
  1126. tmp10 = z3 + z1;
  1127. tmp11 = z3 - z2;
  1128. tmp22 = z3 - ((z1 - z2) << 1); /* c0 = (c4-c8)*2 */
  1129. z2 = (INT32) wsptr[2];
  1130. z3 = (INT32) wsptr[6];
  1131. z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c6 */
  1132. tmp12 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c2-c6 */
  1133. tmp13 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c2+c6 */
  1134. tmp20 = tmp10 + tmp12;
  1135. tmp24 = tmp10 - tmp12;
  1136. tmp21 = tmp11 + tmp13;
  1137. tmp23 = tmp11 - tmp13;
  1138. /* Odd part */
  1139. z1 = (INT32) wsptr[1];
  1140. z2 = (INT32) wsptr[3];
  1141. z3 = (INT32) wsptr[5];
  1142. z3 <<= CONST_BITS;
  1143. z4 = (INT32) wsptr[7];
  1144. tmp11 = z2 + z4;
  1145. tmp13 = z2 - z4;
  1146. tmp12 = MULTIPLY(tmp13, FIX(0.309016994)); /* (c3-c7)/2 */
  1147. z2 = MULTIPLY(tmp11, FIX(0.951056516)); /* (c3+c7)/2 */
  1148. z4 = z3 + tmp12;
  1149. tmp10 = MULTIPLY(z1, FIX(1.396802247)) + z2 + z4; /* c1 */
  1150. tmp14 = MULTIPLY(z1, FIX(0.221231742)) - z2 + z4; /* c9 */
  1151. z2 = MULTIPLY(tmp11, FIX(0.587785252)); /* (c1-c9)/2 */
  1152. z4 = z3 - tmp12 - (tmp13 << (CONST_BITS - 1));
  1153. tmp12 = ((z1 - tmp13) << CONST_BITS) - z3;
  1154. tmp11 = MULTIPLY(z1, FIX(1.260073511)) - z2 - z4; /* c3 */
  1155. tmp13 = MULTIPLY(z1, FIX(0.642039522)) - z2 + z4; /* c7 */
  1156. /* Final output stage */
  1157. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10,
  1158. CONST_BITS+PASS1_BITS+3)
  1159. & RANGE_MASK];
  1160. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10,
  1161. CONST_BITS+PASS1_BITS+3)
  1162. & RANGE_MASK];
  1163. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11,
  1164. CONST_BITS+PASS1_BITS+3)
  1165. & RANGE_MASK];
  1166. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11,
  1167. CONST_BITS+PASS1_BITS+3)
  1168. & RANGE_MASK];
  1169. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12,
  1170. CONST_BITS+PASS1_BITS+3)
  1171. & RANGE_MASK];
  1172. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12,
  1173. CONST_BITS+PASS1_BITS+3)
  1174. & RANGE_MASK];
  1175. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13,
  1176. CONST_BITS+PASS1_BITS+3)
  1177. & RANGE_MASK];
  1178. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13,
  1179. CONST_BITS+PASS1_BITS+3)
  1180. & RANGE_MASK];
  1181. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14,
  1182. CONST_BITS+PASS1_BITS+3)
  1183. & RANGE_MASK];
  1184. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14,
  1185. CONST_BITS+PASS1_BITS+3)
  1186. & RANGE_MASK];
  1187. wsptr += 8; /* advance pointer to next row */
  1188. }
  1189. }
  1190. /*
  1191. * Perform dequantization and inverse DCT on one block of coefficients,
  1192. * producing a 11x11 output block.
  1193. *
  1194. * Optimized algorithm with 24 multiplications in the 1-D kernel.
  1195. * cK represents sqrt(2) * cos(K*pi/22).
  1196. */
  1197. GLOBAL(void)
  1198. jpeg_idct_11x11 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  1199. JCOEFPTR coef_block,
  1200. JSAMPARRAY output_buf, JDIMENSION output_col)
  1201. {
  1202. INT32 tmp10, tmp11, tmp12, tmp13, tmp14;
  1203. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25;
  1204. INT32 z1, z2, z3, z4;
  1205. JCOEFPTR inptr;
  1206. ISLOW_MULT_TYPE * quantptr;
  1207. int * wsptr;
  1208. JSAMPROW outptr;
  1209. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  1210. int ctr;
  1211. int workspace[8*11]; /* buffers data between passes */
  1212. SHIFT_TEMPS
  1213. /* Pass 1: process columns from input, store into work array. */
  1214. inptr = coef_block;
  1215. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  1216. wsptr = workspace;
  1217. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  1218. /* Even part */
  1219. tmp10 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  1220. tmp10 <<= CONST_BITS;
  1221. /* Add fudge factor here for final descale. */
  1222. tmp10 += ONE << (CONST_BITS-PASS1_BITS-1);
  1223. z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  1224. z2 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  1225. z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  1226. tmp20 = MULTIPLY(z2 - z3, FIX(2.546640132)); /* c2+c4 */
  1227. tmp23 = MULTIPLY(z2 - z1, FIX(0.430815045)); /* c2-c6 */
  1228. z4 = z1 + z3;
  1229. tmp24 = MULTIPLY(z4, - FIX(1.155664402)); /* -(c2-c10) */
  1230. z4 -= z2;
  1231. tmp25 = tmp10 + MULTIPLY(z4, FIX(1.356927976)); /* c2 */
  1232. tmp21 = tmp20 + tmp23 + tmp25 -
  1233. MULTIPLY(z2, FIX(1.821790775)); /* c2+c4+c10-c6 */
  1234. tmp20 += tmp25 + MULTIPLY(z3, FIX(2.115825087)); /* c4+c6 */
  1235. tmp23 += tmp25 - MULTIPLY(z1, FIX(1.513598477)); /* c6+c8 */
  1236. tmp24 += tmp25;
  1237. tmp22 = tmp24 - MULTIPLY(z3, FIX(0.788749120)); /* c8+c10 */
  1238. tmp24 += MULTIPLY(z2, FIX(1.944413522)) - /* c2+c8 */
  1239. MULTIPLY(z1, FIX(1.390975730)); /* c4+c10 */
  1240. tmp25 = tmp10 - MULTIPLY(z4, FIX(1.414213562)); /* c0 */
  1241. /* Odd part */
  1242. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  1243. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  1244. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  1245. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  1246. tmp11 = z1 + z2;
  1247. tmp14 = MULTIPLY(tmp11 + z3 + z4, FIX(0.398430003)); /* c9 */
  1248. tmp11 = MULTIPLY(tmp11, FIX(0.887983902)); /* c3-c9 */
  1249. tmp12 = MULTIPLY(z1 + z3, FIX(0.670361295)); /* c5-c9 */
  1250. tmp13 = tmp14 + MULTIPLY(z1 + z4, FIX(0.366151574)); /* c7-c9 */
  1251. tmp10 = tmp11 + tmp12 + tmp13 -
  1252. MULTIPLY(z1, FIX(0.923107866)); /* c7+c5+c3-c1-2*c9 */
  1253. z1 = tmp14 - MULTIPLY(z2 + z3, FIX(1.163011579)); /* c7+c9 */
  1254. tmp11 += z1 + MULTIPLY(z2, FIX(2.073276588)); /* c1+c7+3*c9-c3 */
  1255. tmp12 += z1 - MULTIPLY(z3, FIX(1.192193623)); /* c3+c5-c7-c9 */
  1256. z1 = MULTIPLY(z2 + z4, - FIX(1.798248910)); /* -(c1+c9) */
  1257. tmp11 += z1;
  1258. tmp13 += z1 + MULTIPLY(z4, FIX(2.102458632)); /* c1+c5+c9-c7 */
  1259. tmp14 += MULTIPLY(z2, - FIX(1.467221301)) + /* -(c5+c9) */
  1260. MULTIPLY(z3, FIX(1.001388905)) - /* c1-c9 */
  1261. MULTIPLY(z4, FIX(1.684843907)); /* c3+c9 */
  1262. /* Final output stage */
  1263. wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS);
  1264. wsptr[8*10] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS);
  1265. wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS);
  1266. wsptr[8*9] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS);
  1267. wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS);
  1268. wsptr[8*8] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS);
  1269. wsptr[8*3] = (int) RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS-PASS1_BITS);
  1270. wsptr[8*7] = (int) RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS-PASS1_BITS);
  1271. wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS);
  1272. wsptr[8*6] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS);
  1273. wsptr[8*5] = (int) RIGHT_SHIFT(tmp25, CONST_BITS-PASS1_BITS);
  1274. }
  1275. /* Pass 2: process 11 rows from work array, store into output array. */
  1276. wsptr = workspace;
  1277. for (ctr = 0; ctr < 11; ctr++) {
  1278. outptr = output_buf[ctr] + output_col;
  1279. /* Even part */
  1280. /* Add fudge factor here for final descale. */
  1281. tmp10 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  1282. tmp10 <<= CONST_BITS;
  1283. z1 = (INT32) wsptr[2];
  1284. z2 = (INT32) wsptr[4];
  1285. z3 = (INT32) wsptr[6];
  1286. tmp20 = MULTIPLY(z2 - z3, FIX(2.546640132)); /* c2+c4 */
  1287. tmp23 = MULTIPLY(z2 - z1, FIX(0.430815045)); /* c2-c6 */
  1288. z4 = z1 + z3;
  1289. tmp24 = MULTIPLY(z4, - FIX(1.155664402)); /* -(c2-c10) */
  1290. z4 -= z2;
  1291. tmp25 = tmp10 + MULTIPLY(z4, FIX(1.356927976)); /* c2 */
  1292. tmp21 = tmp20 + tmp23 + tmp25 -
  1293. MULTIPLY(z2, FIX(1.821790775)); /* c2+c4+c10-c6 */
  1294. tmp20 += tmp25 + MULTIPLY(z3, FIX(2.115825087)); /* c4+c6 */
  1295. tmp23 += tmp25 - MULTIPLY(z1, FIX(1.513598477)); /* c6+c8 */
  1296. tmp24 += tmp25;
  1297. tmp22 = tmp24 - MULTIPLY(z3, FIX(0.788749120)); /* c8+c10 */
  1298. tmp24 += MULTIPLY(z2, FIX(1.944413522)) - /* c2+c8 */
  1299. MULTIPLY(z1, FIX(1.390975730)); /* c4+c10 */
  1300. tmp25 = tmp10 - MULTIPLY(z4, FIX(1.414213562)); /* c0 */
  1301. /* Odd part */
  1302. z1 = (INT32) wsptr[1];
  1303. z2 = (INT32) wsptr[3];
  1304. z3 = (INT32) wsptr[5];
  1305. z4 = (INT32) wsptr[7];
  1306. tmp11 = z1 + z2;
  1307. tmp14 = MULTIPLY(tmp11 + z3 + z4, FIX(0.398430003)); /* c9 */
  1308. tmp11 = MULTIPLY(tmp11, FIX(0.887983902)); /* c3-c9 */
  1309. tmp12 = MULTIPLY(z1 + z3, FIX(0.670361295)); /* c5-c9 */
  1310. tmp13 = tmp14 + MULTIPLY(z1 + z4, FIX(0.366151574)); /* c7-c9 */
  1311. tmp10 = tmp11 + tmp12 + tmp13 -
  1312. MULTIPLY(z1, FIX(0.923107866)); /* c7+c5+c3-c1-2*c9 */
  1313. z1 = tmp14 - MULTIPLY(z2 + z3, FIX(1.163011579)); /* c7+c9 */
  1314. tmp11 += z1 + MULTIPLY(z2, FIX(2.073276588)); /* c1+c7+3*c9-c3 */
  1315. tmp12 += z1 - MULTIPLY(z3, FIX(1.192193623)); /* c3+c5-c7-c9 */
  1316. z1 = MULTIPLY(z2 + z4, - FIX(1.798248910)); /* -(c1+c9) */
  1317. tmp11 += z1;
  1318. tmp13 += z1 + MULTIPLY(z4, FIX(2.102458632)); /* c1+c5+c9-c7 */
  1319. tmp14 += MULTIPLY(z2, - FIX(1.467221301)) + /* -(c5+c9) */
  1320. MULTIPLY(z3, FIX(1.001388905)) - /* c1-c9 */
  1321. MULTIPLY(z4, FIX(1.684843907)); /* c3+c9 */
  1322. /* Final output stage */
  1323. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10,
  1324. CONST_BITS+PASS1_BITS+3)
  1325. & RANGE_MASK];
  1326. outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10,
  1327. CONST_BITS+PASS1_BITS+3)
  1328. & RANGE_MASK];
  1329. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11,
  1330. CONST_BITS+PASS1_BITS+3)
  1331. & RANGE_MASK];
  1332. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11,
  1333. CONST_BITS+PASS1_BITS+3)
  1334. & RANGE_MASK];
  1335. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12,
  1336. CONST_BITS+PASS1_BITS+3)
  1337. & RANGE_MASK];
  1338. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12,
  1339. CONST_BITS+PASS1_BITS+3)
  1340. & RANGE_MASK];
  1341. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13,
  1342. CONST_BITS+PASS1_BITS+3)
  1343. & RANGE_MASK];
  1344. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13,
  1345. CONST_BITS+PASS1_BITS+3)
  1346. & RANGE_MASK];
  1347. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14,
  1348. CONST_BITS+PASS1_BITS+3)
  1349. & RANGE_MASK];
  1350. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14,
  1351. CONST_BITS+PASS1_BITS+3)
  1352. & RANGE_MASK];
  1353. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25,
  1354. CONST_BITS+PASS1_BITS+3)
  1355. & RANGE_MASK];
  1356. wsptr += 8; /* advance pointer to next row */
  1357. }
  1358. }
  1359. /*
  1360. * Perform dequantization and inverse DCT on one block of coefficients,
  1361. * producing a 12x12 output block.
  1362. *
  1363. * Optimized algorithm with 15 multiplications in the 1-D kernel.
  1364. * cK represents sqrt(2) * cos(K*pi/24).
  1365. */
  1366. GLOBAL(void)
  1367. jpeg_idct_12x12 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  1368. JCOEFPTR coef_block,
  1369. JSAMPARRAY output_buf, JDIMENSION output_col)
  1370. {
  1371. INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
  1372. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25;
  1373. INT32 z1, z2, z3, z4;
  1374. JCOEFPTR inptr;
  1375. ISLOW_MULT_TYPE * quantptr;
  1376. int * wsptr;
  1377. JSAMPROW outptr;
  1378. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  1379. int ctr;
  1380. int workspace[8*12]; /* buffers data between passes */
  1381. SHIFT_TEMPS
  1382. /* Pass 1: process columns from input, store into work array. */
  1383. inptr = coef_block;
  1384. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  1385. wsptr = workspace;
  1386. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  1387. /* Even part */
  1388. z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  1389. z3 <<= CONST_BITS;
  1390. /* Add fudge factor here for final descale. */
  1391. z3 += ONE << (CONST_BITS-PASS1_BITS-1);
  1392. z4 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  1393. z4 = MULTIPLY(z4, FIX(1.224744871)); /* c4 */
  1394. tmp10 = z3 + z4;
  1395. tmp11 = z3 - z4;
  1396. z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  1397. z4 = MULTIPLY(z1, FIX(1.366025404)); /* c2 */
  1398. z1 <<= CONST_BITS;
  1399. z2 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  1400. z2 <<= CONST_BITS;
  1401. tmp12 = z1 - z2;
  1402. tmp21 = z3 + tmp12;
  1403. tmp24 = z3 - tmp12;
  1404. tmp12 = z4 + z2;
  1405. tmp20 = tmp10 + tmp12;
  1406. tmp25 = tmp10 - tmp12;
  1407. tmp12 = z4 - z1 - z2;
  1408. tmp22 = tmp11 + tmp12;
  1409. tmp23 = tmp11 - tmp12;
  1410. /* Odd part */
  1411. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  1412. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  1413. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  1414. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  1415. tmp11 = MULTIPLY(z2, FIX(1.306562965)); /* c3 */
  1416. tmp14 = MULTIPLY(z2, - FIX_0_541196100); /* -c9 */
  1417. tmp10 = z1 + z3;
  1418. tmp15 = MULTIPLY(tmp10 + z4, FIX(0.860918669)); /* c7 */
  1419. tmp12 = tmp15 + MULTIPLY(tmp10, FIX(0.261052384)); /* c5-c7 */
  1420. tmp10 = tmp12 + tmp11 + MULTIPLY(z1, FIX(0.280143716)); /* c1-c5 */
  1421. tmp13 = MULTIPLY(z3 + z4, - FIX(1.045510580)); /* -(c7+c11) */
  1422. tmp12 += tmp13 + tmp14 - MULTIPLY(z3, FIX(1.478575242)); /* c1+c5-c7-c11 */
  1423. tmp13 += tmp15 - tmp11 + MULTIPLY(z4, FIX(1.586706681)); /* c1+c11 */
  1424. tmp15 += tmp14 - MULTIPLY(z1, FIX(0.676326758)) - /* c7-c11 */
  1425. MULTIPLY(z4, FIX(1.982889723)); /* c5+c7 */
  1426. z1 -= z4;
  1427. z2 -= z3;
  1428. z3 = MULTIPLY(z1 + z2, FIX_0_541196100); /* c9 */
  1429. tmp11 = z3 + MULTIPLY(z1, FIX_0_765366865); /* c3-c9 */
  1430. tmp14 = z3 - MULTIPLY(z2, FIX_1_847759065); /* c3+c9 */
  1431. /* Final output stage */
  1432. wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS);
  1433. wsptr[8*11] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS);
  1434. wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS);
  1435. wsptr[8*10] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS);
  1436. wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS);
  1437. wsptr[8*9] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS);
  1438. wsptr[8*3] = (int) RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS-PASS1_BITS);
  1439. wsptr[8*8] = (int) RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS-PASS1_BITS);
  1440. wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS);
  1441. wsptr[8*7] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS);
  1442. wsptr[8*5] = (int) RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS-PASS1_BITS);
  1443. wsptr[8*6] = (int) RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS-PASS1_BITS);
  1444. }
  1445. /* Pass 2: process 12 rows from work array, store into output array. */
  1446. wsptr = workspace;
  1447. for (ctr = 0; ctr < 12; ctr++) {
  1448. outptr = output_buf[ctr] + output_col;
  1449. /* Even part */
  1450. /* Add fudge factor here for final descale. */
  1451. z3 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  1452. z3 <<= CONST_BITS;
  1453. z4 = (INT32) wsptr[4];
  1454. z4 = MULTIPLY(z4, FIX(1.224744871)); /* c4 */
  1455. tmp10 = z3 + z4;
  1456. tmp11 = z3 - z4;
  1457. z1 = (INT32) wsptr[2];
  1458. z4 = MULTIPLY(z1, FIX(1.366025404)); /* c2 */
  1459. z1 <<= CONST_BITS;
  1460. z2 = (INT32) wsptr[6];
  1461. z2 <<= CONST_BITS;
  1462. tmp12 = z1 - z2;
  1463. tmp21 = z3 + tmp12;
  1464. tmp24 = z3 - tmp12;
  1465. tmp12 = z4 + z2;
  1466. tmp20 = tmp10 + tmp12;
  1467. tmp25 = tmp10 - tmp12;
  1468. tmp12 = z4 - z1 - z2;
  1469. tmp22 = tmp11 + tmp12;
  1470. tmp23 = tmp11 - tmp12;
  1471. /* Odd part */
  1472. z1 = (INT32) wsptr[1];
  1473. z2 = (INT32) wsptr[3];
  1474. z3 = (INT32) wsptr[5];
  1475. z4 = (INT32) wsptr[7];
  1476. tmp11 = MULTIPLY(z2, FIX(1.306562965)); /* c3 */
  1477. tmp14 = MULTIPLY(z2, - FIX_0_541196100); /* -c9 */
  1478. tmp10 = z1 + z3;
  1479. tmp15 = MULTIPLY(tmp10 + z4, FIX(0.860918669)); /* c7 */
  1480. tmp12 = tmp15 + MULTIPLY(tmp10, FIX(0.261052384)); /* c5-c7 */
  1481. tmp10 = tmp12 + tmp11 + MULTIPLY(z1, FIX(0.280143716)); /* c1-c5 */
  1482. tmp13 = MULTIPLY(z3 + z4, - FIX(1.045510580)); /* -(c7+c11) */
  1483. tmp12 += tmp13 + tmp14 - MULTIPLY(z3, FIX(1.478575242)); /* c1+c5-c7-c11 */
  1484. tmp13 += tmp15 - tmp11 + MULTIPLY(z4, FIX(1.586706681)); /* c1+c11 */
  1485. tmp15 += tmp14 - MULTIPLY(z1, FIX(0.676326758)) - /* c7-c11 */
  1486. MULTIPLY(z4, FIX(1.982889723)); /* c5+c7 */
  1487. z1 -= z4;
  1488. z2 -= z3;
  1489. z3 = MULTIPLY(z1 + z2, FIX_0_541196100); /* c9 */
  1490. tmp11 = z3 + MULTIPLY(z1, FIX_0_765366865); /* c3-c9 */
  1491. tmp14 = z3 - MULTIPLY(z2, FIX_1_847759065); /* c3+c9 */
  1492. /* Final output stage */
  1493. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10,
  1494. CONST_BITS+PASS1_BITS+3)
  1495. & RANGE_MASK];
  1496. outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10,
  1497. CONST_BITS+PASS1_BITS+3)
  1498. & RANGE_MASK];
  1499. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11,
  1500. CONST_BITS+PASS1_BITS+3)
  1501. & RANGE_MASK];
  1502. outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11,
  1503. CONST_BITS+PASS1_BITS+3)
  1504. & RANGE_MASK];
  1505. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12,
  1506. CONST_BITS+PASS1_BITS+3)
  1507. & RANGE_MASK];
  1508. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12,
  1509. CONST_BITS+PASS1_BITS+3)
  1510. & RANGE_MASK];
  1511. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13,
  1512. CONST_BITS+PASS1_BITS+3)
  1513. & RANGE_MASK];
  1514. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13,
  1515. CONST_BITS+PASS1_BITS+3)
  1516. & RANGE_MASK];
  1517. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14,
  1518. CONST_BITS+PASS1_BITS+3)
  1519. & RANGE_MASK];
  1520. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14,
  1521. CONST_BITS+PASS1_BITS+3)
  1522. & RANGE_MASK];
  1523. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp15,
  1524. CONST_BITS+PASS1_BITS+3)
  1525. & RANGE_MASK];
  1526. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp15,
  1527. CONST_BITS+PASS1_BITS+3)
  1528. & RANGE_MASK];
  1529. wsptr += 8; /* advance pointer to next row */
  1530. }
  1531. }
  1532. /*
  1533. * Perform dequantization and inverse DCT on one block of coefficients,
  1534. * producing a 13x13 output block.
  1535. *
  1536. * Optimized algorithm with 29 multiplications in the 1-D kernel.
  1537. * cK represents sqrt(2) * cos(K*pi/26).
  1538. */
  1539. GLOBAL(void)
  1540. jpeg_idct_13x13 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  1541. JCOEFPTR coef_block,
  1542. JSAMPARRAY output_buf, JDIMENSION output_col)
  1543. {
  1544. INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
  1545. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26;
  1546. INT32 z1, z2, z3, z4;
  1547. JCOEFPTR inptr;
  1548. ISLOW_MULT_TYPE * quantptr;
  1549. int * wsptr;
  1550. JSAMPROW outptr;
  1551. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  1552. int ctr;
  1553. int workspace[8*13]; /* buffers data between passes */
  1554. SHIFT_TEMPS
  1555. /* Pass 1: process columns from input, store into work array. */
  1556. inptr = coef_block;
  1557. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  1558. wsptr = workspace;
  1559. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  1560. /* Even part */
  1561. z1 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  1562. z1 <<= CONST_BITS;
  1563. /* Add fudge factor here for final descale. */
  1564. z1 += ONE << (CONST_BITS-PASS1_BITS-1);
  1565. z2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  1566. z3 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  1567. z4 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  1568. tmp10 = z3 + z4;
  1569. tmp11 = z3 - z4;
  1570. tmp12 = MULTIPLY(tmp10, FIX(1.155388986)); /* (c4+c6)/2 */
  1571. tmp13 = MULTIPLY(tmp11, FIX(0.096834934)) + z1; /* (c4-c6)/2 */
  1572. tmp20 = MULTIPLY(z2, FIX(1.373119086)) + tmp12 + tmp13; /* c2 */
  1573. tmp22 = MULTIPLY(z2, FIX(0.501487041)) - tmp12 + tmp13; /* c10 */
  1574. tmp12 = MULTIPLY(tmp10, FIX(0.316450131)); /* (c8-c12)/2 */
  1575. tmp13 = MULTIPLY(tmp11, FIX(0.486914739)) + z1; /* (c8+c12)/2 */
  1576. tmp21 = MULTIPLY(z2, FIX(1.058554052)) - tmp12 + tmp13; /* c6 */
  1577. tmp25 = MULTIPLY(z2, - FIX(1.252223920)) + tmp12 + tmp13; /* c4 */
  1578. tmp12 = MULTIPLY(tmp10, FIX(0.435816023)); /* (c2-c10)/2 */
  1579. tmp13 = MULTIPLY(tmp11, FIX(0.937303064)) - z1; /* (c2+c10)/2 */
  1580. tmp23 = MULTIPLY(z2, - FIX(0.170464608)) - tmp12 - tmp13; /* c12 */
  1581. tmp24 = MULTIPLY(z2, - FIX(0.803364869)) + tmp12 - tmp13; /* c8 */
  1582. tmp26 = MULTIPLY(tmp11 - z2, FIX(1.414213562)) + z1; /* c0 */
  1583. /* Odd part */
  1584. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  1585. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  1586. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  1587. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  1588. tmp11 = MULTIPLY(z1 + z2, FIX(1.322312651)); /* c3 */
  1589. tmp12 = MULTIPLY(z1 + z3, FIX(1.163874945)); /* c5 */
  1590. tmp15 = z1 + z4;
  1591. tmp13 = MULTIPLY(tmp15, FIX(0.937797057)); /* c7 */
  1592. tmp10 = tmp11 + tmp12 + tmp13 -
  1593. MULTIPLY(z1, FIX(2.020082300)); /* c7+c5+c3-c1 */
  1594. tmp14 = MULTIPLY(z2 + z3, - FIX(0.338443458)); /* -c11 */
  1595. tmp11 += tmp14 + MULTIPLY(z2, FIX(0.837223564)); /* c5+c9+c11-c3 */
  1596. tmp12 += tmp14 - MULTIPLY(z3, FIX(1.572116027)); /* c1+c5-c9-c11 */
  1597. tmp14 = MULTIPLY(z2 + z4, - FIX(1.163874945)); /* -c5 */
  1598. tmp11 += tmp14;
  1599. tmp13 += tmp14 + MULTIPLY(z4, FIX(2.205608352)); /* c3+c5+c9-c7 */
  1600. tmp14 = MULTIPLY(z3 + z4, - FIX(0.657217813)); /* -c9 */
  1601. tmp12 += tmp14;
  1602. tmp13 += tmp14;
  1603. tmp15 = MULTIPLY(tmp15, FIX(0.338443458)); /* c11 */
  1604. tmp14 = tmp15 + MULTIPLY(z1, FIX(0.318774355)) - /* c9-c11 */
  1605. MULTIPLY(z2, FIX(0.466105296)); /* c1-c7 */
  1606. z1 = MULTIPLY(z3 - z2, FIX(0.937797057)); /* c7 */
  1607. tmp14 += z1;
  1608. tmp15 += z1 + MULTIPLY(z3, FIX(0.384515595)) - /* c3-c7 */
  1609. MULTIPLY(z4, FIX(1.742345811)); /* c1+c11 */
  1610. /* Final output stage */
  1611. wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS);
  1612. wsptr[8*12] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS);
  1613. wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS);
  1614. wsptr[8*11] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS);
  1615. wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS);
  1616. wsptr[8*10] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS);
  1617. wsptr[8*3] = (int) RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS-PASS1_BITS);
  1618. wsptr[8*9] = (int) RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS-PASS1_BITS);
  1619. wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS);
  1620. wsptr[8*8] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS);
  1621. wsptr[8*5] = (int) RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS-PASS1_BITS);
  1622. wsptr[8*7] = (int) RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS-PASS1_BITS);
  1623. wsptr[8*6] = (int) RIGHT_SHIFT(tmp26, CONST_BITS-PASS1_BITS);
  1624. }
  1625. /* Pass 2: process 13 rows from work array, store into output array. */
  1626. wsptr = workspace;
  1627. for (ctr = 0; ctr < 13; ctr++) {
  1628. outptr = output_buf[ctr] + output_col;
  1629. /* Even part */
  1630. /* Add fudge factor here for final descale. */
  1631. z1 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  1632. z1 <<= CONST_BITS;
  1633. z2 = (INT32) wsptr[2];
  1634. z3 = (INT32) wsptr[4];
  1635. z4 = (INT32) wsptr[6];
  1636. tmp10 = z3 + z4;
  1637. tmp11 = z3 - z4;
  1638. tmp12 = MULTIPLY(tmp10, FIX(1.155388986)); /* (c4+c6)/2 */
  1639. tmp13 = MULTIPLY(tmp11, FIX(0.096834934)) + z1; /* (c4-c6)/2 */
  1640. tmp20 = MULTIPLY(z2, FIX(1.373119086)) + tmp12 + tmp13; /* c2 */
  1641. tmp22 = MULTIPLY(z2, FIX(0.501487041)) - tmp12 + tmp13; /* c10 */
  1642. tmp12 = MULTIPLY(tmp10, FIX(0.316450131)); /* (c8-c12)/2 */
  1643. tmp13 = MULTIPLY(tmp11, FIX(0.486914739)) + z1; /* (c8+c12)/2 */
  1644. tmp21 = MULTIPLY(z2, FIX(1.058554052)) - tmp12 + tmp13; /* c6 */
  1645. tmp25 = MULTIPLY(z2, - FIX(1.252223920)) + tmp12 + tmp13; /* c4 */
  1646. tmp12 = MULTIPLY(tmp10, FIX(0.435816023)); /* (c2-c10)/2 */
  1647. tmp13 = MULTIPLY(tmp11, FIX(0.937303064)) - z1; /* (c2+c10)/2 */
  1648. tmp23 = MULTIPLY(z2, - FIX(0.170464608)) - tmp12 - tmp13; /* c12 */
  1649. tmp24 = MULTIPLY(z2, - FIX(0.803364869)) + tmp12 - tmp13; /* c8 */
  1650. tmp26 = MULTIPLY(tmp11 - z2, FIX(1.414213562)) + z1; /* c0 */
  1651. /* Odd part */
  1652. z1 = (INT32) wsptr[1];
  1653. z2 = (INT32) wsptr[3];
  1654. z3 = (INT32) wsptr[5];
  1655. z4 = (INT32) wsptr[7];
  1656. tmp11 = MULTIPLY(z1 + z2, FIX(1.322312651)); /* c3 */
  1657. tmp12 = MULTIPLY(z1 + z3, FIX(1.163874945)); /* c5 */
  1658. tmp15 = z1 + z4;
  1659. tmp13 = MULTIPLY(tmp15, FIX(0.937797057)); /* c7 */
  1660. tmp10 = tmp11 + tmp12 + tmp13 -
  1661. MULTIPLY(z1, FIX(2.020082300)); /* c7+c5+c3-c1 */
  1662. tmp14 = MULTIPLY(z2 + z3, - FIX(0.338443458)); /* -c11 */
  1663. tmp11 += tmp14 + MULTIPLY(z2, FIX(0.837223564)); /* c5+c9+c11-c3 */
  1664. tmp12 += tmp14 - MULTIPLY(z3, FIX(1.572116027)); /* c1+c5-c9-c11 */
  1665. tmp14 = MULTIPLY(z2 + z4, - FIX(1.163874945)); /* -c5 */
  1666. tmp11 += tmp14;
  1667. tmp13 += tmp14 + MULTIPLY(z4, FIX(2.205608352)); /* c3+c5+c9-c7 */
  1668. tmp14 = MULTIPLY(z3 + z4, - FIX(0.657217813)); /* -c9 */
  1669. tmp12 += tmp14;
  1670. tmp13 += tmp14;
  1671. tmp15 = MULTIPLY(tmp15, FIX(0.338443458)); /* c11 */
  1672. tmp14 = tmp15 + MULTIPLY(z1, FIX(0.318774355)) - /* c9-c11 */
  1673. MULTIPLY(z2, FIX(0.466105296)); /* c1-c7 */
  1674. z1 = MULTIPLY(z3 - z2, FIX(0.937797057)); /* c7 */
  1675. tmp14 += z1;
  1676. tmp15 += z1 + MULTIPLY(z3, FIX(0.384515595)) - /* c3-c7 */
  1677. MULTIPLY(z4, FIX(1.742345811)); /* c1+c11 */
  1678. /* Final output stage */
  1679. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10,
  1680. CONST_BITS+PASS1_BITS+3)
  1681. & RANGE_MASK];
  1682. outptr[12] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10,
  1683. CONST_BITS+PASS1_BITS+3)
  1684. & RANGE_MASK];
  1685. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11,
  1686. CONST_BITS+PASS1_BITS+3)
  1687. & RANGE_MASK];
  1688. outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11,
  1689. CONST_BITS+PASS1_BITS+3)
  1690. & RANGE_MASK];
  1691. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12,
  1692. CONST_BITS+PASS1_BITS+3)
  1693. & RANGE_MASK];
  1694. outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12,
  1695. CONST_BITS+PASS1_BITS+3)
  1696. & RANGE_MASK];
  1697. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13,
  1698. CONST_BITS+PASS1_BITS+3)
  1699. & RANGE_MASK];
  1700. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13,
  1701. CONST_BITS+PASS1_BITS+3)
  1702. & RANGE_MASK];
  1703. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14,
  1704. CONST_BITS+PASS1_BITS+3)
  1705. & RANGE_MASK];
  1706. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14,
  1707. CONST_BITS+PASS1_BITS+3)
  1708. & RANGE_MASK];
  1709. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp15,
  1710. CONST_BITS+PASS1_BITS+3)
  1711. & RANGE_MASK];
  1712. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp15,
  1713. CONST_BITS+PASS1_BITS+3)
  1714. & RANGE_MASK];
  1715. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp26,
  1716. CONST_BITS+PASS1_BITS+3)
  1717. & RANGE_MASK];
  1718. wsptr += 8; /* advance pointer to next row */
  1719. }
  1720. }
  1721. /*
  1722. * Perform dequantization and inverse DCT on one block of coefficients,
  1723. * producing a 14x14 output block.
  1724. *
  1725. * Optimized algorithm with 20 multiplications in the 1-D kernel.
  1726. * cK represents sqrt(2) * cos(K*pi/28).
  1727. */
  1728. GLOBAL(void)
  1729. jpeg_idct_14x14 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  1730. JCOEFPTR coef_block,
  1731. JSAMPARRAY output_buf, JDIMENSION output_col)
  1732. {
  1733. INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16;
  1734. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26;
  1735. INT32 z1, z2, z3, z4;
  1736. JCOEFPTR inptr;
  1737. ISLOW_MULT_TYPE * quantptr;
  1738. int * wsptr;
  1739. JSAMPROW outptr;
  1740. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  1741. int ctr;
  1742. int workspace[8*14]; /* buffers data between passes */
  1743. SHIFT_TEMPS
  1744. /* Pass 1: process columns from input, store into work array. */
  1745. inptr = coef_block;
  1746. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  1747. wsptr = workspace;
  1748. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  1749. /* Even part */
  1750. z1 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  1751. z1 <<= CONST_BITS;
  1752. /* Add fudge factor here for final descale. */
  1753. z1 += ONE << (CONST_BITS-PASS1_BITS-1);
  1754. z4 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  1755. z2 = MULTIPLY(z4, FIX(1.274162392)); /* c4 */
  1756. z3 = MULTIPLY(z4, FIX(0.314692123)); /* c12 */
  1757. z4 = MULTIPLY(z4, FIX(0.881747734)); /* c8 */
  1758. tmp10 = z1 + z2;
  1759. tmp11 = z1 + z3;
  1760. tmp12 = z1 - z4;
  1761. tmp23 = RIGHT_SHIFT(z1 - ((z2 + z3 - z4) << 1), /* c0 = (c4+c12-c8)*2 */
  1762. CONST_BITS-PASS1_BITS);
  1763. z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  1764. z2 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  1765. z3 = MULTIPLY(z1 + z2, FIX(1.105676686)); /* c6 */
  1766. tmp13 = z3 + MULTIPLY(z1, FIX(0.273079590)); /* c2-c6 */
  1767. tmp14 = z3 - MULTIPLY(z2, FIX(1.719280954)); /* c6+c10 */
  1768. tmp15 = MULTIPLY(z1, FIX(0.613604268)) - /* c10 */
  1769. MULTIPLY(z2, FIX(1.378756276)); /* c2 */
  1770. tmp20 = tmp10 + tmp13;
  1771. tmp26 = tmp10 - tmp13;
  1772. tmp21 = tmp11 + tmp14;
  1773. tmp25 = tmp11 - tmp14;
  1774. tmp22 = tmp12 + tmp15;
  1775. tmp24 = tmp12 - tmp15;
  1776. /* Odd part */
  1777. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  1778. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  1779. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  1780. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  1781. tmp13 = z4 << CONST_BITS;
  1782. tmp14 = z1 + z3;
  1783. tmp11 = MULTIPLY(z1 + z2, FIX(1.334852607)); /* c3 */
  1784. tmp12 = MULTIPLY(tmp14, FIX(1.197448846)); /* c5 */
  1785. tmp10 = tmp11 + tmp12 + tmp13 - MULTIPLY(z1, FIX(1.126980169)); /* c3+c5-c1 */
  1786. tmp14 = MULTIPLY(tmp14, FIX(0.752406978)); /* c9 */
  1787. tmp16 = tmp14 - MULTIPLY(z1, FIX(1.061150426)); /* c9+c11-c13 */
  1788. z1 -= z2;
  1789. tmp15 = MULTIPLY(z1, FIX(0.467085129)) - tmp13; /* c11 */
  1790. tmp16 += tmp15;
  1791. z1 += z4;
  1792. z4 = MULTIPLY(z2 + z3, - FIX(0.158341681)) - tmp13; /* -c13 */
  1793. tmp11 += z4 - MULTIPLY(z2, FIX(0.424103948)); /* c3-c9-c13 */
  1794. tmp12 += z4 - MULTIPLY(z3, FIX(2.373959773)); /* c3+c5-c13 */
  1795. z4 = MULTIPLY(z3 - z2, FIX(1.405321284)); /* c1 */
  1796. tmp14 += z4 + tmp13 - MULTIPLY(z3, FIX(1.6906431334)); /* c1+c9-c11 */
  1797. tmp15 += z4 + MULTIPLY(z2, FIX(0.674957567)); /* c1+c11-c5 */
  1798. tmp13 = (z1 - z3) << PASS1_BITS;
  1799. /* Final output stage */
  1800. wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS);
  1801. wsptr[8*13] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS);
  1802. wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS);
  1803. wsptr[8*12] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS);
  1804. wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS);
  1805. wsptr[8*11] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS);
  1806. wsptr[8*3] = (int) (tmp23 + tmp13);
  1807. wsptr[8*10] = (int) (tmp23 - tmp13);
  1808. wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS);
  1809. wsptr[8*9] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS);
  1810. wsptr[8*5] = (int) RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS-PASS1_BITS);
  1811. wsptr[8*8] = (int) RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS-PASS1_BITS);
  1812. wsptr[8*6] = (int) RIGHT_SHIFT(tmp26 + tmp16, CONST_BITS-PASS1_BITS);
  1813. wsptr[8*7] = (int) RIGHT_SHIFT(tmp26 - tmp16, CONST_BITS-PASS1_BITS);
  1814. }
  1815. /* Pass 2: process 14 rows from work array, store into output array. */
  1816. wsptr = workspace;
  1817. for (ctr = 0; ctr < 14; ctr++) {
  1818. outptr = output_buf[ctr] + output_col;
  1819. /* Even part */
  1820. /* Add fudge factor here for final descale. */
  1821. z1 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  1822. z1 <<= CONST_BITS;
  1823. z4 = (INT32) wsptr[4];
  1824. z2 = MULTIPLY(z4, FIX(1.274162392)); /* c4 */
  1825. z3 = MULTIPLY(z4, FIX(0.314692123)); /* c12 */
  1826. z4 = MULTIPLY(z4, FIX(0.881747734)); /* c8 */
  1827. tmp10 = z1 + z2;
  1828. tmp11 = z1 + z3;
  1829. tmp12 = z1 - z4;
  1830. tmp23 = z1 - ((z2 + z3 - z4) << 1); /* c0 = (c4+c12-c8)*2 */
  1831. z1 = (INT32) wsptr[2];
  1832. z2 = (INT32) wsptr[6];
  1833. z3 = MULTIPLY(z1 + z2, FIX(1.105676686)); /* c6 */
  1834. tmp13 = z3 + MULTIPLY(z1, FIX(0.273079590)); /* c2-c6 */
  1835. tmp14 = z3 - MULTIPLY(z2, FIX(1.719280954)); /* c6+c10 */
  1836. tmp15 = MULTIPLY(z1, FIX(0.613604268)) - /* c10 */
  1837. MULTIPLY(z2, FIX(1.378756276)); /* c2 */
  1838. tmp20 = tmp10 + tmp13;
  1839. tmp26 = tmp10 - tmp13;
  1840. tmp21 = tmp11 + tmp14;
  1841. tmp25 = tmp11 - tmp14;
  1842. tmp22 = tmp12 + tmp15;
  1843. tmp24 = tmp12 - tmp15;
  1844. /* Odd part */
  1845. z1 = (INT32) wsptr[1];
  1846. z2 = (INT32) wsptr[3];
  1847. z3 = (INT32) wsptr[5];
  1848. z4 = (INT32) wsptr[7];
  1849. z4 <<= CONST_BITS;
  1850. tmp14 = z1 + z3;
  1851. tmp11 = MULTIPLY(z1 + z2, FIX(1.334852607)); /* c3 */
  1852. tmp12 = MULTIPLY(tmp14, FIX(1.197448846)); /* c5 */
  1853. tmp10 = tmp11 + tmp12 + z4 - MULTIPLY(z1, FIX(1.126980169)); /* c3+c5-c1 */
  1854. tmp14 = MULTIPLY(tmp14, FIX(0.752406978)); /* c9 */
  1855. tmp16 = tmp14 - MULTIPLY(z1, FIX(1.061150426)); /* c9+c11-c13 */
  1856. z1 -= z2;
  1857. tmp15 = MULTIPLY(z1, FIX(0.467085129)) - z4; /* c11 */
  1858. tmp16 += tmp15;
  1859. tmp13 = MULTIPLY(z2 + z3, - FIX(0.158341681)) - z4; /* -c13 */
  1860. tmp11 += tmp13 - MULTIPLY(z2, FIX(0.424103948)); /* c3-c9-c13 */
  1861. tmp12 += tmp13 - MULTIPLY(z3, FIX(2.373959773)); /* c3+c5-c13 */
  1862. tmp13 = MULTIPLY(z3 - z2, FIX(1.405321284)); /* c1 */
  1863. tmp14 += tmp13 + z4 - MULTIPLY(z3, FIX(1.6906431334)); /* c1+c9-c11 */
  1864. tmp15 += tmp13 + MULTIPLY(z2, FIX(0.674957567)); /* c1+c11-c5 */
  1865. tmp13 = ((z1 - z3) << CONST_BITS) + z4;
  1866. /* Final output stage */
  1867. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10,
  1868. CONST_BITS+PASS1_BITS+3)
  1869. & RANGE_MASK];
  1870. outptr[13] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10,
  1871. CONST_BITS+PASS1_BITS+3)
  1872. & RANGE_MASK];
  1873. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11,
  1874. CONST_BITS+PASS1_BITS+3)
  1875. & RANGE_MASK];
  1876. outptr[12] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11,
  1877. CONST_BITS+PASS1_BITS+3)
  1878. & RANGE_MASK];
  1879. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12,
  1880. CONST_BITS+PASS1_BITS+3)
  1881. & RANGE_MASK];
  1882. outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12,
  1883. CONST_BITS+PASS1_BITS+3)
  1884. & RANGE_MASK];
  1885. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13,
  1886. CONST_BITS+PASS1_BITS+3)
  1887. & RANGE_MASK];
  1888. outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13,
  1889. CONST_BITS+PASS1_BITS+3)
  1890. & RANGE_MASK];
  1891. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14,
  1892. CONST_BITS+PASS1_BITS+3)
  1893. & RANGE_MASK];
  1894. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14,
  1895. CONST_BITS+PASS1_BITS+3)
  1896. & RANGE_MASK];
  1897. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp15,
  1898. CONST_BITS+PASS1_BITS+3)
  1899. & RANGE_MASK];
  1900. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp15,
  1901. CONST_BITS+PASS1_BITS+3)
  1902. & RANGE_MASK];
  1903. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp26 + tmp16,
  1904. CONST_BITS+PASS1_BITS+3)
  1905. & RANGE_MASK];
  1906. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp26 - tmp16,
  1907. CONST_BITS+PASS1_BITS+3)
  1908. & RANGE_MASK];
  1909. wsptr += 8; /* advance pointer to next row */
  1910. }
  1911. }
  1912. /*
  1913. * Perform dequantization and inverse DCT on one block of coefficients,
  1914. * producing a 15x15 output block.
  1915. *
  1916. * Optimized algorithm with 22 multiplications in the 1-D kernel.
  1917. * cK represents sqrt(2) * cos(K*pi/30).
  1918. */
  1919. GLOBAL(void)
  1920. jpeg_idct_15x15 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  1921. JCOEFPTR coef_block,
  1922. JSAMPARRAY output_buf, JDIMENSION output_col)
  1923. {
  1924. INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16;
  1925. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26, tmp27;
  1926. INT32 z1, z2, z3, z4;
  1927. JCOEFPTR inptr;
  1928. ISLOW_MULT_TYPE * quantptr;
  1929. int * wsptr;
  1930. JSAMPROW outptr;
  1931. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  1932. int ctr;
  1933. int workspace[8*15]; /* buffers data between passes */
  1934. SHIFT_TEMPS
  1935. /* Pass 1: process columns from input, store into work array. */
  1936. inptr = coef_block;
  1937. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  1938. wsptr = workspace;
  1939. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  1940. /* Even part */
  1941. z1 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  1942. z1 <<= CONST_BITS;
  1943. /* Add fudge factor here for final descale. */
  1944. z1 += ONE << (CONST_BITS-PASS1_BITS-1);
  1945. z2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  1946. z3 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  1947. z4 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  1948. tmp10 = MULTIPLY(z4, FIX(0.437016024)); /* c12 */
  1949. tmp11 = MULTIPLY(z4, FIX(1.144122806)); /* c6 */
  1950. tmp12 = z1 - tmp10;
  1951. tmp13 = z1 + tmp11;
  1952. z1 -= (tmp11 - tmp10) << 1; /* c0 = (c6-c12)*2 */
  1953. z4 = z2 - z3;
  1954. z3 += z2;
  1955. tmp10 = MULTIPLY(z3, FIX(1.337628990)); /* (c2+c4)/2 */
  1956. tmp11 = MULTIPLY(z4, FIX(0.045680613)); /* (c2-c4)/2 */
  1957. z2 = MULTIPLY(z2, FIX(1.439773946)); /* c4+c14 */
  1958. tmp20 = tmp13 + tmp10 + tmp11;
  1959. tmp23 = tmp12 - tmp10 + tmp11 + z2;
  1960. tmp10 = MULTIPLY(z3, FIX(0.547059574)); /* (c8+c14)/2 */
  1961. tmp11 = MULTIPLY(z4, FIX(0.399234004)); /* (c8-c14)/2 */
  1962. tmp25 = tmp13 - tmp10 - tmp11;
  1963. tmp26 = tmp12 + tmp10 - tmp11 - z2;
  1964. tmp10 = MULTIPLY(z3, FIX(0.790569415)); /* (c6+c12)/2 */
  1965. tmp11 = MULTIPLY(z4, FIX(0.353553391)); /* (c6-c12)/2 */
  1966. tmp21 = tmp12 + tmp10 + tmp11;
  1967. tmp24 = tmp13 - tmp10 + tmp11;
  1968. tmp11 += tmp11;
  1969. tmp22 = z1 + tmp11; /* c10 = c6-c12 */
  1970. tmp27 = z1 - tmp11 - tmp11; /* c0 = (c6-c12)*2 */
  1971. /* Odd part */
  1972. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  1973. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  1974. z4 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  1975. z3 = MULTIPLY(z4, FIX(1.224744871)); /* c5 */
  1976. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  1977. tmp13 = z2 - z4;
  1978. tmp15 = MULTIPLY(z1 + tmp13, FIX(0.831253876)); /* c9 */
  1979. tmp11 = tmp15 + MULTIPLY(z1, FIX(0.513743148)); /* c3-c9 */
  1980. tmp14 = tmp15 - MULTIPLY(tmp13, FIX(2.176250899)); /* c3+c9 */
  1981. tmp13 = MULTIPLY(z2, - FIX(0.831253876)); /* -c9 */
  1982. tmp15 = MULTIPLY(z2, - FIX(1.344997024)); /* -c3 */
  1983. z2 = z1 - z4;
  1984. tmp12 = z3 + MULTIPLY(z2, FIX(1.406466353)); /* c1 */
  1985. tmp10 = tmp12 + MULTIPLY(z4, FIX(2.457431844)) - tmp15; /* c1+c7 */
  1986. tmp16 = tmp12 - MULTIPLY(z1, FIX(1.112434820)) + tmp13; /* c1-c13 */
  1987. tmp12 = MULTIPLY(z2, FIX(1.224744871)) - z3; /* c5 */
  1988. z2 = MULTIPLY(z1 + z4, FIX(0.575212477)); /* c11 */
  1989. tmp13 += z2 + MULTIPLY(z1, FIX(0.475753014)) - z3; /* c7-c11 */
  1990. tmp15 += z2 - MULTIPLY(z4, FIX(0.869244010)) + z3; /* c11+c13 */
  1991. /* Final output stage */
  1992. wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS);
  1993. wsptr[8*14] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS);
  1994. wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS);
  1995. wsptr[8*13] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS);
  1996. wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS);
  1997. wsptr[8*12] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS);
  1998. wsptr[8*3] = (int) RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS-PASS1_BITS);
  1999. wsptr[8*11] = (int) RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS-PASS1_BITS);
  2000. wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS);
  2001. wsptr[8*10] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS);
  2002. wsptr[8*5] = (int) RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS-PASS1_BITS);
  2003. wsptr[8*9] = (int) RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS-PASS1_BITS);
  2004. wsptr[8*6] = (int) RIGHT_SHIFT(tmp26 + tmp16, CONST_BITS-PASS1_BITS);
  2005. wsptr[8*8] = (int) RIGHT_SHIFT(tmp26 - tmp16, CONST_BITS-PASS1_BITS);
  2006. wsptr[8*7] = (int) RIGHT_SHIFT(tmp27, CONST_BITS-PASS1_BITS);
  2007. }
  2008. /* Pass 2: process 15 rows from work array, store into output array. */
  2009. wsptr = workspace;
  2010. for (ctr = 0; ctr < 15; ctr++) {
  2011. outptr = output_buf[ctr] + output_col;
  2012. /* Even part */
  2013. /* Add fudge factor here for final descale. */
  2014. z1 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  2015. z1 <<= CONST_BITS;
  2016. z2 = (INT32) wsptr[2];
  2017. z3 = (INT32) wsptr[4];
  2018. z4 = (INT32) wsptr[6];
  2019. tmp10 = MULTIPLY(z4, FIX(0.437016024)); /* c12 */
  2020. tmp11 = MULTIPLY(z4, FIX(1.144122806)); /* c6 */
  2021. tmp12 = z1 - tmp10;
  2022. tmp13 = z1 + tmp11;
  2023. z1 -= (tmp11 - tmp10) << 1; /* c0 = (c6-c12)*2 */
  2024. z4 = z2 - z3;
  2025. z3 += z2;
  2026. tmp10 = MULTIPLY(z3, FIX(1.337628990)); /* (c2+c4)/2 */
  2027. tmp11 = MULTIPLY(z4, FIX(0.045680613)); /* (c2-c4)/2 */
  2028. z2 = MULTIPLY(z2, FIX(1.439773946)); /* c4+c14 */
  2029. tmp20 = tmp13 + tmp10 + tmp11;
  2030. tmp23 = tmp12 - tmp10 + tmp11 + z2;
  2031. tmp10 = MULTIPLY(z3, FIX(0.547059574)); /* (c8+c14)/2 */
  2032. tmp11 = MULTIPLY(z4, FIX(0.399234004)); /* (c8-c14)/2 */
  2033. tmp25 = tmp13 - tmp10 - tmp11;
  2034. tmp26 = tmp12 + tmp10 - tmp11 - z2;
  2035. tmp10 = MULTIPLY(z3, FIX(0.790569415)); /* (c6+c12)/2 */
  2036. tmp11 = MULTIPLY(z4, FIX(0.353553391)); /* (c6-c12)/2 */
  2037. tmp21 = tmp12 + tmp10 + tmp11;
  2038. tmp24 = tmp13 - tmp10 + tmp11;
  2039. tmp11 += tmp11;
  2040. tmp22 = z1 + tmp11; /* c10 = c6-c12 */
  2041. tmp27 = z1 - tmp11 - tmp11; /* c0 = (c6-c12)*2 */
  2042. /* Odd part */
  2043. z1 = (INT32) wsptr[1];
  2044. z2 = (INT32) wsptr[3];
  2045. z4 = (INT32) wsptr[5];
  2046. z3 = MULTIPLY(z4, FIX(1.224744871)); /* c5 */
  2047. z4 = (INT32) wsptr[7];
  2048. tmp13 = z2 - z4;
  2049. tmp15 = MULTIPLY(z1 + tmp13, FIX(0.831253876)); /* c9 */
  2050. tmp11 = tmp15 + MULTIPLY(z1, FIX(0.513743148)); /* c3-c9 */
  2051. tmp14 = tmp15 - MULTIPLY(tmp13, FIX(2.176250899)); /* c3+c9 */
  2052. tmp13 = MULTIPLY(z2, - FIX(0.831253876)); /* -c9 */
  2053. tmp15 = MULTIPLY(z2, - FIX(1.344997024)); /* -c3 */
  2054. z2 = z1 - z4;
  2055. tmp12 = z3 + MULTIPLY(z2, FIX(1.406466353)); /* c1 */
  2056. tmp10 = tmp12 + MULTIPLY(z4, FIX(2.457431844)) - tmp15; /* c1+c7 */
  2057. tmp16 = tmp12 - MULTIPLY(z1, FIX(1.112434820)) + tmp13; /* c1-c13 */
  2058. tmp12 = MULTIPLY(z2, FIX(1.224744871)) - z3; /* c5 */
  2059. z2 = MULTIPLY(z1 + z4, FIX(0.575212477)); /* c11 */
  2060. tmp13 += z2 + MULTIPLY(z1, FIX(0.475753014)) - z3; /* c7-c11 */
  2061. tmp15 += z2 - MULTIPLY(z4, FIX(0.869244010)) + z3; /* c11+c13 */
  2062. /* Final output stage */
  2063. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10,
  2064. CONST_BITS+PASS1_BITS+3)
  2065. & RANGE_MASK];
  2066. outptr[14] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10,
  2067. CONST_BITS+PASS1_BITS+3)
  2068. & RANGE_MASK];
  2069. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11,
  2070. CONST_BITS+PASS1_BITS+3)
  2071. & RANGE_MASK];
  2072. outptr[13] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11,
  2073. CONST_BITS+PASS1_BITS+3)
  2074. & RANGE_MASK];
  2075. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12,
  2076. CONST_BITS+PASS1_BITS+3)
  2077. & RANGE_MASK];
  2078. outptr[12] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12,
  2079. CONST_BITS+PASS1_BITS+3)
  2080. & RANGE_MASK];
  2081. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13,
  2082. CONST_BITS+PASS1_BITS+3)
  2083. & RANGE_MASK];
  2084. outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13,
  2085. CONST_BITS+PASS1_BITS+3)
  2086. & RANGE_MASK];
  2087. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14,
  2088. CONST_BITS+PASS1_BITS+3)
  2089. & RANGE_MASK];
  2090. outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14,
  2091. CONST_BITS+PASS1_BITS+3)
  2092. & RANGE_MASK];
  2093. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp15,
  2094. CONST_BITS+PASS1_BITS+3)
  2095. & RANGE_MASK];
  2096. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp15,
  2097. CONST_BITS+PASS1_BITS+3)
  2098. & RANGE_MASK];
  2099. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp26 + tmp16,
  2100. CONST_BITS+PASS1_BITS+3)
  2101. & RANGE_MASK];
  2102. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp26 - tmp16,
  2103. CONST_BITS+PASS1_BITS+3)
  2104. & RANGE_MASK];
  2105. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp27,
  2106. CONST_BITS+PASS1_BITS+3)
  2107. & RANGE_MASK];
  2108. wsptr += 8; /* advance pointer to next row */
  2109. }
  2110. }
  2111. /*
  2112. * Perform dequantization and inverse DCT on one block of coefficients,
  2113. * producing a 16x16 output block.
  2114. *
  2115. * Optimized algorithm with 28 multiplications in the 1-D kernel.
  2116. * cK represents sqrt(2) * cos(K*pi/32).
  2117. */
  2118. GLOBAL(void)
  2119. jpeg_idct_16x16 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  2120. JCOEFPTR coef_block,
  2121. JSAMPARRAY output_buf, JDIMENSION output_col)
  2122. {
  2123. INT32 tmp0, tmp1, tmp2, tmp3, tmp10, tmp11, tmp12, tmp13;
  2124. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26, tmp27;
  2125. INT32 z1, z2, z3, z4;
  2126. JCOEFPTR inptr;
  2127. ISLOW_MULT_TYPE * quantptr;
  2128. int * wsptr;
  2129. JSAMPROW outptr;
  2130. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  2131. int ctr;
  2132. int workspace[8*16]; /* buffers data between passes */
  2133. SHIFT_TEMPS
  2134. /* Pass 1: process columns from input, store into work array. */
  2135. inptr = coef_block;
  2136. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  2137. wsptr = workspace;
  2138. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  2139. /* Even part */
  2140. tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  2141. tmp0 <<= CONST_BITS;
  2142. /* Add fudge factor here for final descale. */
  2143. tmp0 += 1 << (CONST_BITS-PASS1_BITS-1);
  2144. z1 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  2145. tmp1 = MULTIPLY(z1, FIX(1.306562965)); /* c4[16] = c2[8] */
  2146. tmp2 = MULTIPLY(z1, FIX_0_541196100); /* c12[16] = c6[8] */
  2147. tmp10 = tmp0 + tmp1;
  2148. tmp11 = tmp0 - tmp1;
  2149. tmp12 = tmp0 + tmp2;
  2150. tmp13 = tmp0 - tmp2;
  2151. z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  2152. z2 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  2153. z3 = z1 - z2;
  2154. z4 = MULTIPLY(z3, FIX(0.275899379)); /* c14[16] = c7[8] */
  2155. z3 = MULTIPLY(z3, FIX(1.387039845)); /* c2[16] = c1[8] */
  2156. tmp0 = z3 + MULTIPLY(z2, FIX_2_562915447); /* (c6+c2)[16] = (c3+c1)[8] */
  2157. tmp1 = z4 + MULTIPLY(z1, FIX_0_899976223); /* (c6-c14)[16] = (c3-c7)[8] */
  2158. tmp2 = z3 - MULTIPLY(z1, FIX(0.601344887)); /* (c2-c10)[16] = (c1-c5)[8] */
  2159. tmp3 = z4 - MULTIPLY(z2, FIX(0.509795579)); /* (c10-c14)[16] = (c5-c7)[8] */
  2160. tmp20 = tmp10 + tmp0;
  2161. tmp27 = tmp10 - tmp0;
  2162. tmp21 = tmp12 + tmp1;
  2163. tmp26 = tmp12 - tmp1;
  2164. tmp22 = tmp13 + tmp2;
  2165. tmp25 = tmp13 - tmp2;
  2166. tmp23 = tmp11 + tmp3;
  2167. tmp24 = tmp11 - tmp3;
  2168. /* Odd part */
  2169. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  2170. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  2171. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  2172. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  2173. tmp11 = z1 + z3;
  2174. tmp1 = MULTIPLY(z1 + z2, FIX(1.353318001)); /* c3 */
  2175. tmp2 = MULTIPLY(tmp11, FIX(1.247225013)); /* c5 */
  2176. tmp3 = MULTIPLY(z1 + z4, FIX(1.093201867)); /* c7 */
  2177. tmp10 = MULTIPLY(z1 - z4, FIX(0.897167586)); /* c9 */
  2178. tmp11 = MULTIPLY(tmp11, FIX(0.666655658)); /* c11 */
  2179. tmp12 = MULTIPLY(z1 - z2, FIX(0.410524528)); /* c13 */
  2180. tmp0 = tmp1 + tmp2 + tmp3 -
  2181. MULTIPLY(z1, FIX(2.286341144)); /* c7+c5+c3-c1 */
  2182. tmp13 = tmp10 + tmp11 + tmp12 -
  2183. MULTIPLY(z1, FIX(1.835730603)); /* c9+c11+c13-c15 */
  2184. z1 = MULTIPLY(z2 + z3, FIX(0.138617169)); /* c15 */
  2185. tmp1 += z1 + MULTIPLY(z2, FIX(0.071888074)); /* c9+c11-c3-c15 */
  2186. tmp2 += z1 - MULTIPLY(z3, FIX(1.125726048)); /* c5+c7+c15-c3 */
  2187. z1 = MULTIPLY(z3 - z2, FIX(1.407403738)); /* c1 */
  2188. tmp11 += z1 - MULTIPLY(z3, FIX(0.766367282)); /* c1+c11-c9-c13 */
  2189. tmp12 += z1 + MULTIPLY(z2, FIX(1.971951411)); /* c1+c5+c13-c7 */
  2190. z2 += z4;
  2191. z1 = MULTIPLY(z2, - FIX(0.666655658)); /* -c11 */
  2192. tmp1 += z1;
  2193. tmp3 += z1 + MULTIPLY(z4, FIX(1.065388962)); /* c3+c11+c15-c7 */
  2194. z2 = MULTIPLY(z2, - FIX(1.247225013)); /* -c5 */
  2195. tmp10 += z2 + MULTIPLY(z4, FIX(3.141271809)); /* c1+c5+c9-c13 */
  2196. tmp12 += z2;
  2197. z2 = MULTIPLY(z3 + z4, - FIX(1.353318001)); /* -c3 */
  2198. tmp2 += z2;
  2199. tmp3 += z2;
  2200. z2 = MULTIPLY(z4 - z3, FIX(0.410524528)); /* c13 */
  2201. tmp10 += z2;
  2202. tmp11 += z2;
  2203. /* Final output stage */
  2204. wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp0, CONST_BITS-PASS1_BITS);
  2205. wsptr[8*15] = (int) RIGHT_SHIFT(tmp20 - tmp0, CONST_BITS-PASS1_BITS);
  2206. wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp1, CONST_BITS-PASS1_BITS);
  2207. wsptr[8*14] = (int) RIGHT_SHIFT(tmp21 - tmp1, CONST_BITS-PASS1_BITS);
  2208. wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp2, CONST_BITS-PASS1_BITS);
  2209. wsptr[8*13] = (int) RIGHT_SHIFT(tmp22 - tmp2, CONST_BITS-PASS1_BITS);
  2210. wsptr[8*3] = (int) RIGHT_SHIFT(tmp23 + tmp3, CONST_BITS-PASS1_BITS);
  2211. wsptr[8*12] = (int) RIGHT_SHIFT(tmp23 - tmp3, CONST_BITS-PASS1_BITS);
  2212. wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp10, CONST_BITS-PASS1_BITS);
  2213. wsptr[8*11] = (int) RIGHT_SHIFT(tmp24 - tmp10, CONST_BITS-PASS1_BITS);
  2214. wsptr[8*5] = (int) RIGHT_SHIFT(tmp25 + tmp11, CONST_BITS-PASS1_BITS);
  2215. wsptr[8*10] = (int) RIGHT_SHIFT(tmp25 - tmp11, CONST_BITS-PASS1_BITS);
  2216. wsptr[8*6] = (int) RIGHT_SHIFT(tmp26 + tmp12, CONST_BITS-PASS1_BITS);
  2217. wsptr[8*9] = (int) RIGHT_SHIFT(tmp26 - tmp12, CONST_BITS-PASS1_BITS);
  2218. wsptr[8*7] = (int) RIGHT_SHIFT(tmp27 + tmp13, CONST_BITS-PASS1_BITS);
  2219. wsptr[8*8] = (int) RIGHT_SHIFT(tmp27 - tmp13, CONST_BITS-PASS1_BITS);
  2220. }
  2221. /* Pass 2: process 16 rows from work array, store into output array. */
  2222. wsptr = workspace;
  2223. for (ctr = 0; ctr < 16; ctr++) {
  2224. outptr = output_buf[ctr] + output_col;
  2225. /* Even part */
  2226. /* Add fudge factor here for final descale. */
  2227. tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  2228. tmp0 <<= CONST_BITS;
  2229. z1 = (INT32) wsptr[4];
  2230. tmp1 = MULTIPLY(z1, FIX(1.306562965)); /* c4[16] = c2[8] */
  2231. tmp2 = MULTIPLY(z1, FIX_0_541196100); /* c12[16] = c6[8] */
  2232. tmp10 = tmp0 + tmp1;
  2233. tmp11 = tmp0 - tmp1;
  2234. tmp12 = tmp0 + tmp2;
  2235. tmp13 = tmp0 - tmp2;
  2236. z1 = (INT32) wsptr[2];
  2237. z2 = (INT32) wsptr[6];
  2238. z3 = z1 - z2;
  2239. z4 = MULTIPLY(z3, FIX(0.275899379)); /* c14[16] = c7[8] */
  2240. z3 = MULTIPLY(z3, FIX(1.387039845)); /* c2[16] = c1[8] */
  2241. tmp0 = z3 + MULTIPLY(z2, FIX_2_562915447); /* (c6+c2)[16] = (c3+c1)[8] */
  2242. tmp1 = z4 + MULTIPLY(z1, FIX_0_899976223); /* (c6-c14)[16] = (c3-c7)[8] */
  2243. tmp2 = z3 - MULTIPLY(z1, FIX(0.601344887)); /* (c2-c10)[16] = (c1-c5)[8] */
  2244. tmp3 = z4 - MULTIPLY(z2, FIX(0.509795579)); /* (c10-c14)[16] = (c5-c7)[8] */
  2245. tmp20 = tmp10 + tmp0;
  2246. tmp27 = tmp10 - tmp0;
  2247. tmp21 = tmp12 + tmp1;
  2248. tmp26 = tmp12 - tmp1;
  2249. tmp22 = tmp13 + tmp2;
  2250. tmp25 = tmp13 - tmp2;
  2251. tmp23 = tmp11 + tmp3;
  2252. tmp24 = tmp11 - tmp3;
  2253. /* Odd part */
  2254. z1 = (INT32) wsptr[1];
  2255. z2 = (INT32) wsptr[3];
  2256. z3 = (INT32) wsptr[5];
  2257. z4 = (INT32) wsptr[7];
  2258. tmp11 = z1 + z3;
  2259. tmp1 = MULTIPLY(z1 + z2, FIX(1.353318001)); /* c3 */
  2260. tmp2 = MULTIPLY(tmp11, FIX(1.247225013)); /* c5 */
  2261. tmp3 = MULTIPLY(z1 + z4, FIX(1.093201867)); /* c7 */
  2262. tmp10 = MULTIPLY(z1 - z4, FIX(0.897167586)); /* c9 */
  2263. tmp11 = MULTIPLY(tmp11, FIX(0.666655658)); /* c11 */
  2264. tmp12 = MULTIPLY(z1 - z2, FIX(0.410524528)); /* c13 */
  2265. tmp0 = tmp1 + tmp2 + tmp3 -
  2266. MULTIPLY(z1, FIX(2.286341144)); /* c7+c5+c3-c1 */
  2267. tmp13 = tmp10 + tmp11 + tmp12 -
  2268. MULTIPLY(z1, FIX(1.835730603)); /* c9+c11+c13-c15 */
  2269. z1 = MULTIPLY(z2 + z3, FIX(0.138617169)); /* c15 */
  2270. tmp1 += z1 + MULTIPLY(z2, FIX(0.071888074)); /* c9+c11-c3-c15 */
  2271. tmp2 += z1 - MULTIPLY(z3, FIX(1.125726048)); /* c5+c7+c15-c3 */
  2272. z1 = MULTIPLY(z3 - z2, FIX(1.407403738)); /* c1 */
  2273. tmp11 += z1 - MULTIPLY(z3, FIX(0.766367282)); /* c1+c11-c9-c13 */
  2274. tmp12 += z1 + MULTIPLY(z2, FIX(1.971951411)); /* c1+c5+c13-c7 */
  2275. z2 += z4;
  2276. z1 = MULTIPLY(z2, - FIX(0.666655658)); /* -c11 */
  2277. tmp1 += z1;
  2278. tmp3 += z1 + MULTIPLY(z4, FIX(1.065388962)); /* c3+c11+c15-c7 */
  2279. z2 = MULTIPLY(z2, - FIX(1.247225013)); /* -c5 */
  2280. tmp10 += z2 + MULTIPLY(z4, FIX(3.141271809)); /* c1+c5+c9-c13 */
  2281. tmp12 += z2;
  2282. z2 = MULTIPLY(z3 + z4, - FIX(1.353318001)); /* -c3 */
  2283. tmp2 += z2;
  2284. tmp3 += z2;
  2285. z2 = MULTIPLY(z4 - z3, FIX(0.410524528)); /* c13 */
  2286. tmp10 += z2;
  2287. tmp11 += z2;
  2288. /* Final output stage */
  2289. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp0,
  2290. CONST_BITS+PASS1_BITS+3)
  2291. & RANGE_MASK];
  2292. outptr[15] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp0,
  2293. CONST_BITS+PASS1_BITS+3)
  2294. & RANGE_MASK];
  2295. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp1,
  2296. CONST_BITS+PASS1_BITS+3)
  2297. & RANGE_MASK];
  2298. outptr[14] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp1,
  2299. CONST_BITS+PASS1_BITS+3)
  2300. & RANGE_MASK];
  2301. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp2,
  2302. CONST_BITS+PASS1_BITS+3)
  2303. & RANGE_MASK];
  2304. outptr[13] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp2,
  2305. CONST_BITS+PASS1_BITS+3)
  2306. & RANGE_MASK];
  2307. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp3,
  2308. CONST_BITS+PASS1_BITS+3)
  2309. & RANGE_MASK];
  2310. outptr[12] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp3,
  2311. CONST_BITS+PASS1_BITS+3)
  2312. & RANGE_MASK];
  2313. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp10,
  2314. CONST_BITS+PASS1_BITS+3)
  2315. & RANGE_MASK];
  2316. outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp10,
  2317. CONST_BITS+PASS1_BITS+3)
  2318. & RANGE_MASK];
  2319. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp11,
  2320. CONST_BITS+PASS1_BITS+3)
  2321. & RANGE_MASK];
  2322. outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp11,
  2323. CONST_BITS+PASS1_BITS+3)
  2324. & RANGE_MASK];
  2325. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp26 + tmp12,
  2326. CONST_BITS+PASS1_BITS+3)
  2327. & RANGE_MASK];
  2328. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp26 - tmp12,
  2329. CONST_BITS+PASS1_BITS+3)
  2330. & RANGE_MASK];
  2331. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp27 + tmp13,
  2332. CONST_BITS+PASS1_BITS+3)
  2333. & RANGE_MASK];
  2334. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp27 - tmp13,
  2335. CONST_BITS+PASS1_BITS+3)
  2336. & RANGE_MASK];
  2337. wsptr += 8; /* advance pointer to next row */
  2338. }
  2339. }
  2340. /*
  2341. * Perform dequantization and inverse DCT on one block of coefficients,
  2342. * producing a 16x8 output block.
  2343. *
  2344. * 8-point IDCT in pass 1 (columns), 16-point in pass 2 (rows).
  2345. */
  2346. GLOBAL(void)
  2347. jpeg_idct_16x8 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  2348. JCOEFPTR coef_block,
  2349. JSAMPARRAY output_buf, JDIMENSION output_col)
  2350. {
  2351. INT32 tmp0, tmp1, tmp2, tmp3, tmp10, tmp11, tmp12, tmp13;
  2352. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26, tmp27;
  2353. INT32 z1, z2, z3, z4;
  2354. JCOEFPTR inptr;
  2355. ISLOW_MULT_TYPE * quantptr;
  2356. int * wsptr;
  2357. JSAMPROW outptr;
  2358. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  2359. int ctr;
  2360. int workspace[8*8]; /* buffers data between passes */
  2361. SHIFT_TEMPS
  2362. /* Pass 1: process columns from input, store into work array.
  2363. * Note results are scaled up by sqrt(8) compared to a true IDCT;
  2364. * furthermore, we scale the results by 2**PASS1_BITS.
  2365. * 8-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/16).
  2366. */
  2367. inptr = coef_block;
  2368. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  2369. wsptr = workspace;
  2370. for (ctr = DCTSIZE; ctr > 0; ctr--) {
  2371. /* Due to quantization, we will usually find that many of the input
  2372. * coefficients are zero, especially the AC terms. We can exploit this
  2373. * by short-circuiting the IDCT calculation for any column in which all
  2374. * the AC terms are zero. In that case each output is equal to the
  2375. * DC coefficient (with scale factor as needed).
  2376. * With typical images and quantization tables, half or more of the
  2377. * column DCT calculations can be simplified this way.
  2378. */
  2379. if (inptr[DCTSIZE*1] == 0 && inptr[DCTSIZE*2] == 0 &&
  2380. inptr[DCTSIZE*3] == 0 && inptr[DCTSIZE*4] == 0 &&
  2381. inptr[DCTSIZE*5] == 0 && inptr[DCTSIZE*6] == 0 &&
  2382. inptr[DCTSIZE*7] == 0) {
  2383. /* AC terms all zero */
  2384. int dcval = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) << PASS1_BITS;
  2385. wsptr[DCTSIZE*0] = dcval;
  2386. wsptr[DCTSIZE*1] = dcval;
  2387. wsptr[DCTSIZE*2] = dcval;
  2388. wsptr[DCTSIZE*3] = dcval;
  2389. wsptr[DCTSIZE*4] = dcval;
  2390. wsptr[DCTSIZE*5] = dcval;
  2391. wsptr[DCTSIZE*6] = dcval;
  2392. wsptr[DCTSIZE*7] = dcval;
  2393. inptr++; /* advance pointers to next column */
  2394. quantptr++;
  2395. wsptr++;
  2396. continue;
  2397. }
  2398. /* Even part: reverse the even part of the forward DCT.
  2399. * The rotator is c(-6).
  2400. */
  2401. z2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  2402. z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  2403. z1 = MULTIPLY(z2 + z3, FIX_0_541196100); /* c6 */
  2404. tmp2 = z1 + MULTIPLY(z2, FIX_0_765366865); /* c2-c6 */
  2405. tmp3 = z1 - MULTIPLY(z3, FIX_1_847759065); /* c2+c6 */
  2406. z2 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  2407. z3 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  2408. z2 <<= CONST_BITS;
  2409. z3 <<= CONST_BITS;
  2410. /* Add fudge factor here for final descale. */
  2411. z2 += ONE << (CONST_BITS-PASS1_BITS-1);
  2412. tmp0 = z2 + z3;
  2413. tmp1 = z2 - z3;
  2414. tmp10 = tmp0 + tmp2;
  2415. tmp13 = tmp0 - tmp2;
  2416. tmp11 = tmp1 + tmp3;
  2417. tmp12 = tmp1 - tmp3;
  2418. /* Odd part per figure 8; the matrix is unitary and hence its
  2419. * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
  2420. */
  2421. tmp0 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  2422. tmp1 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  2423. tmp2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  2424. tmp3 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  2425. z2 = tmp0 + tmp2;
  2426. z3 = tmp1 + tmp3;
  2427. z1 = MULTIPLY(z2 + z3, FIX_1_175875602); /* c3 */
  2428. z2 = MULTIPLY(z2, - FIX_1_961570560); /* -c3-c5 */
  2429. z3 = MULTIPLY(z3, - FIX_0_390180644); /* -c3+c5 */
  2430. z2 += z1;
  2431. z3 += z1;
  2432. z1 = MULTIPLY(tmp0 + tmp3, - FIX_0_899976223); /* -c3+c7 */
  2433. tmp0 = MULTIPLY(tmp0, FIX_0_298631336); /* -c1+c3+c5-c7 */
  2434. tmp3 = MULTIPLY(tmp3, FIX_1_501321110); /* c1+c3-c5-c7 */
  2435. tmp0 += z1 + z2;
  2436. tmp3 += z1 + z3;
  2437. z1 = MULTIPLY(tmp1 + tmp2, - FIX_2_562915447); /* -c1-c3 */
  2438. tmp1 = MULTIPLY(tmp1, FIX_2_053119869); /* c1+c3-c5+c7 */
  2439. tmp2 = MULTIPLY(tmp2, FIX_3_072711026); /* c1+c3+c5-c7 */
  2440. tmp1 += z1 + z3;
  2441. tmp2 += z1 + z2;
  2442. /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
  2443. wsptr[DCTSIZE*0] = (int) RIGHT_SHIFT(tmp10 + tmp3, CONST_BITS-PASS1_BITS);
  2444. wsptr[DCTSIZE*7] = (int) RIGHT_SHIFT(tmp10 - tmp3, CONST_BITS-PASS1_BITS);
  2445. wsptr[DCTSIZE*1] = (int) RIGHT_SHIFT(tmp11 + tmp2, CONST_BITS-PASS1_BITS);
  2446. wsptr[DCTSIZE*6] = (int) RIGHT_SHIFT(tmp11 - tmp2, CONST_BITS-PASS1_BITS);
  2447. wsptr[DCTSIZE*2] = (int) RIGHT_SHIFT(tmp12 + tmp1, CONST_BITS-PASS1_BITS);
  2448. wsptr[DCTSIZE*5] = (int) RIGHT_SHIFT(tmp12 - tmp1, CONST_BITS-PASS1_BITS);
  2449. wsptr[DCTSIZE*3] = (int) RIGHT_SHIFT(tmp13 + tmp0, CONST_BITS-PASS1_BITS);
  2450. wsptr[DCTSIZE*4] = (int) RIGHT_SHIFT(tmp13 - tmp0, CONST_BITS-PASS1_BITS);
  2451. inptr++; /* advance pointers to next column */
  2452. quantptr++;
  2453. wsptr++;
  2454. }
  2455. /* Pass 2: process 8 rows from work array, store into output array.
  2456. * 16-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/32).
  2457. */
  2458. wsptr = workspace;
  2459. for (ctr = 0; ctr < 8; ctr++) {
  2460. outptr = output_buf[ctr] + output_col;
  2461. /* Even part */
  2462. /* Add fudge factor here for final descale. */
  2463. tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  2464. tmp0 <<= CONST_BITS;
  2465. z1 = (INT32) wsptr[4];
  2466. tmp1 = MULTIPLY(z1, FIX(1.306562965)); /* c4[16] = c2[8] */
  2467. tmp2 = MULTIPLY(z1, FIX_0_541196100); /* c12[16] = c6[8] */
  2468. tmp10 = tmp0 + tmp1;
  2469. tmp11 = tmp0 - tmp1;
  2470. tmp12 = tmp0 + tmp2;
  2471. tmp13 = tmp0 - tmp2;
  2472. z1 = (INT32) wsptr[2];
  2473. z2 = (INT32) wsptr[6];
  2474. z3 = z1 - z2;
  2475. z4 = MULTIPLY(z3, FIX(0.275899379)); /* c14[16] = c7[8] */
  2476. z3 = MULTIPLY(z3, FIX(1.387039845)); /* c2[16] = c1[8] */
  2477. tmp0 = z3 + MULTIPLY(z2, FIX_2_562915447); /* (c6+c2)[16] = (c3+c1)[8] */
  2478. tmp1 = z4 + MULTIPLY(z1, FIX_0_899976223); /* (c6-c14)[16] = (c3-c7)[8] */
  2479. tmp2 = z3 - MULTIPLY(z1, FIX(0.601344887)); /* (c2-c10)[16] = (c1-c5)[8] */
  2480. tmp3 = z4 - MULTIPLY(z2, FIX(0.509795579)); /* (c10-c14)[16] = (c5-c7)[8] */
  2481. tmp20 = tmp10 + tmp0;
  2482. tmp27 = tmp10 - tmp0;
  2483. tmp21 = tmp12 + tmp1;
  2484. tmp26 = tmp12 - tmp1;
  2485. tmp22 = tmp13 + tmp2;
  2486. tmp25 = tmp13 - tmp2;
  2487. tmp23 = tmp11 + tmp3;
  2488. tmp24 = tmp11 - tmp3;
  2489. /* Odd part */
  2490. z1 = (INT32) wsptr[1];
  2491. z2 = (INT32) wsptr[3];
  2492. z3 = (INT32) wsptr[5];
  2493. z4 = (INT32) wsptr[7];
  2494. tmp11 = z1 + z3;
  2495. tmp1 = MULTIPLY(z1 + z2, FIX(1.353318001)); /* c3 */
  2496. tmp2 = MULTIPLY(tmp11, FIX(1.247225013)); /* c5 */
  2497. tmp3 = MULTIPLY(z1 + z4, FIX(1.093201867)); /* c7 */
  2498. tmp10 = MULTIPLY(z1 - z4, FIX(0.897167586)); /* c9 */
  2499. tmp11 = MULTIPLY(tmp11, FIX(0.666655658)); /* c11 */
  2500. tmp12 = MULTIPLY(z1 - z2, FIX(0.410524528)); /* c13 */
  2501. tmp0 = tmp1 + tmp2 + tmp3 -
  2502. MULTIPLY(z1, FIX(2.286341144)); /* c7+c5+c3-c1 */
  2503. tmp13 = tmp10 + tmp11 + tmp12 -
  2504. MULTIPLY(z1, FIX(1.835730603)); /* c9+c11+c13-c15 */
  2505. z1 = MULTIPLY(z2 + z3, FIX(0.138617169)); /* c15 */
  2506. tmp1 += z1 + MULTIPLY(z2, FIX(0.071888074)); /* c9+c11-c3-c15 */
  2507. tmp2 += z1 - MULTIPLY(z3, FIX(1.125726048)); /* c5+c7+c15-c3 */
  2508. z1 = MULTIPLY(z3 - z2, FIX(1.407403738)); /* c1 */
  2509. tmp11 += z1 - MULTIPLY(z3, FIX(0.766367282)); /* c1+c11-c9-c13 */
  2510. tmp12 += z1 + MULTIPLY(z2, FIX(1.971951411)); /* c1+c5+c13-c7 */
  2511. z2 += z4;
  2512. z1 = MULTIPLY(z2, - FIX(0.666655658)); /* -c11 */
  2513. tmp1 += z1;
  2514. tmp3 += z1 + MULTIPLY(z4, FIX(1.065388962)); /* c3+c11+c15-c7 */
  2515. z2 = MULTIPLY(z2, - FIX(1.247225013)); /* -c5 */
  2516. tmp10 += z2 + MULTIPLY(z4, FIX(3.141271809)); /* c1+c5+c9-c13 */
  2517. tmp12 += z2;
  2518. z2 = MULTIPLY(z3 + z4, - FIX(1.353318001)); /* -c3 */
  2519. tmp2 += z2;
  2520. tmp3 += z2;
  2521. z2 = MULTIPLY(z4 - z3, FIX(0.410524528)); /* c13 */
  2522. tmp10 += z2;
  2523. tmp11 += z2;
  2524. /* Final output stage */
  2525. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp0,
  2526. CONST_BITS+PASS1_BITS+3)
  2527. & RANGE_MASK];
  2528. outptr[15] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp0,
  2529. CONST_BITS+PASS1_BITS+3)
  2530. & RANGE_MASK];
  2531. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp1,
  2532. CONST_BITS+PASS1_BITS+3)
  2533. & RANGE_MASK];
  2534. outptr[14] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp1,
  2535. CONST_BITS+PASS1_BITS+3)
  2536. & RANGE_MASK];
  2537. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp2,
  2538. CONST_BITS+PASS1_BITS+3)
  2539. & RANGE_MASK];
  2540. outptr[13] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp2,
  2541. CONST_BITS+PASS1_BITS+3)
  2542. & RANGE_MASK];
  2543. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp3,
  2544. CONST_BITS+PASS1_BITS+3)
  2545. & RANGE_MASK];
  2546. outptr[12] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp3,
  2547. CONST_BITS+PASS1_BITS+3)
  2548. & RANGE_MASK];
  2549. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp10,
  2550. CONST_BITS+PASS1_BITS+3)
  2551. & RANGE_MASK];
  2552. outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp10,
  2553. CONST_BITS+PASS1_BITS+3)
  2554. & RANGE_MASK];
  2555. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp11,
  2556. CONST_BITS+PASS1_BITS+3)
  2557. & RANGE_MASK];
  2558. outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp11,
  2559. CONST_BITS+PASS1_BITS+3)
  2560. & RANGE_MASK];
  2561. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp26 + tmp12,
  2562. CONST_BITS+PASS1_BITS+3)
  2563. & RANGE_MASK];
  2564. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp26 - tmp12,
  2565. CONST_BITS+PASS1_BITS+3)
  2566. & RANGE_MASK];
  2567. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp27 + tmp13,
  2568. CONST_BITS+PASS1_BITS+3)
  2569. & RANGE_MASK];
  2570. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp27 - tmp13,
  2571. CONST_BITS+PASS1_BITS+3)
  2572. & RANGE_MASK];
  2573. wsptr += 8; /* advance pointer to next row */
  2574. }
  2575. }
  2576. /*
  2577. * Perform dequantization and inverse DCT on one block of coefficients,
  2578. * producing a 14x7 output block.
  2579. *
  2580. * 7-point IDCT in pass 1 (columns), 14-point in pass 2 (rows).
  2581. */
  2582. GLOBAL(void)
  2583. jpeg_idct_14x7 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  2584. JCOEFPTR coef_block,
  2585. JSAMPARRAY output_buf, JDIMENSION output_col)
  2586. {
  2587. INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16;
  2588. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26;
  2589. INT32 z1, z2, z3, z4;
  2590. JCOEFPTR inptr;
  2591. ISLOW_MULT_TYPE * quantptr;
  2592. int * wsptr;
  2593. JSAMPROW outptr;
  2594. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  2595. int ctr;
  2596. int workspace[8*7]; /* buffers data between passes */
  2597. SHIFT_TEMPS
  2598. /* Pass 1: process columns from input, store into work array.
  2599. * 7-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/14).
  2600. */
  2601. inptr = coef_block;
  2602. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  2603. wsptr = workspace;
  2604. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  2605. /* Even part */
  2606. tmp23 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  2607. tmp23 <<= CONST_BITS;
  2608. /* Add fudge factor here for final descale. */
  2609. tmp23 += ONE << (CONST_BITS-PASS1_BITS-1);
  2610. z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  2611. z2 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  2612. z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  2613. tmp20 = MULTIPLY(z2 - z3, FIX(0.881747734)); /* c4 */
  2614. tmp22 = MULTIPLY(z1 - z2, FIX(0.314692123)); /* c6 */
  2615. tmp21 = tmp20 + tmp22 + tmp23 - MULTIPLY(z2, FIX(1.841218003)); /* c2+c4-c6 */
  2616. tmp10 = z1 + z3;
  2617. z2 -= tmp10;
  2618. tmp10 = MULTIPLY(tmp10, FIX(1.274162392)) + tmp23; /* c2 */
  2619. tmp20 += tmp10 - MULTIPLY(z3, FIX(0.077722536)); /* c2-c4-c6 */
  2620. tmp22 += tmp10 - MULTIPLY(z1, FIX(2.470602249)); /* c2+c4+c6 */
  2621. tmp23 += MULTIPLY(z2, FIX(1.414213562)); /* c0 */
  2622. /* Odd part */
  2623. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  2624. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  2625. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  2626. tmp11 = MULTIPLY(z1 + z2, FIX(0.935414347)); /* (c3+c1-c5)/2 */
  2627. tmp12 = MULTIPLY(z1 - z2, FIX(0.170262339)); /* (c3+c5-c1)/2 */
  2628. tmp10 = tmp11 - tmp12;
  2629. tmp11 += tmp12;
  2630. tmp12 = MULTIPLY(z2 + z3, - FIX(1.378756276)); /* -c1 */
  2631. tmp11 += tmp12;
  2632. z2 = MULTIPLY(z1 + z3, FIX(0.613604268)); /* c5 */
  2633. tmp10 += z2;
  2634. tmp12 += z2 + MULTIPLY(z3, FIX(1.870828693)); /* c3+c1-c5 */
  2635. /* Final output stage */
  2636. wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS);
  2637. wsptr[8*6] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS);
  2638. wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS);
  2639. wsptr[8*5] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS);
  2640. wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS);
  2641. wsptr[8*4] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS);
  2642. wsptr[8*3] = (int) RIGHT_SHIFT(tmp23, CONST_BITS-PASS1_BITS);
  2643. }
  2644. /* Pass 2: process 7 rows from work array, store into output array.
  2645. * 14-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/28).
  2646. */
  2647. wsptr = workspace;
  2648. for (ctr = 0; ctr < 7; ctr++) {
  2649. outptr = output_buf[ctr] + output_col;
  2650. /* Even part */
  2651. /* Add fudge factor here for final descale. */
  2652. z1 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  2653. z1 <<= CONST_BITS;
  2654. z4 = (INT32) wsptr[4];
  2655. z2 = MULTIPLY(z4, FIX(1.274162392)); /* c4 */
  2656. z3 = MULTIPLY(z4, FIX(0.314692123)); /* c12 */
  2657. z4 = MULTIPLY(z4, FIX(0.881747734)); /* c8 */
  2658. tmp10 = z1 + z2;
  2659. tmp11 = z1 + z3;
  2660. tmp12 = z1 - z4;
  2661. tmp23 = z1 - ((z2 + z3 - z4) << 1); /* c0 = (c4+c12-c8)*2 */
  2662. z1 = (INT32) wsptr[2];
  2663. z2 = (INT32) wsptr[6];
  2664. z3 = MULTIPLY(z1 + z2, FIX(1.105676686)); /* c6 */
  2665. tmp13 = z3 + MULTIPLY(z1, FIX(0.273079590)); /* c2-c6 */
  2666. tmp14 = z3 - MULTIPLY(z2, FIX(1.719280954)); /* c6+c10 */
  2667. tmp15 = MULTIPLY(z1, FIX(0.613604268)) - /* c10 */
  2668. MULTIPLY(z2, FIX(1.378756276)); /* c2 */
  2669. tmp20 = tmp10 + tmp13;
  2670. tmp26 = tmp10 - tmp13;
  2671. tmp21 = tmp11 + tmp14;
  2672. tmp25 = tmp11 - tmp14;
  2673. tmp22 = tmp12 + tmp15;
  2674. tmp24 = tmp12 - tmp15;
  2675. /* Odd part */
  2676. z1 = (INT32) wsptr[1];
  2677. z2 = (INT32) wsptr[3];
  2678. z3 = (INT32) wsptr[5];
  2679. z4 = (INT32) wsptr[7];
  2680. z4 <<= CONST_BITS;
  2681. tmp14 = z1 + z3;
  2682. tmp11 = MULTIPLY(z1 + z2, FIX(1.334852607)); /* c3 */
  2683. tmp12 = MULTIPLY(tmp14, FIX(1.197448846)); /* c5 */
  2684. tmp10 = tmp11 + tmp12 + z4 - MULTIPLY(z1, FIX(1.126980169)); /* c3+c5-c1 */
  2685. tmp14 = MULTIPLY(tmp14, FIX(0.752406978)); /* c9 */
  2686. tmp16 = tmp14 - MULTIPLY(z1, FIX(1.061150426)); /* c9+c11-c13 */
  2687. z1 -= z2;
  2688. tmp15 = MULTIPLY(z1, FIX(0.467085129)) - z4; /* c11 */
  2689. tmp16 += tmp15;
  2690. tmp13 = MULTIPLY(z2 + z3, - FIX(0.158341681)) - z4; /* -c13 */
  2691. tmp11 += tmp13 - MULTIPLY(z2, FIX(0.424103948)); /* c3-c9-c13 */
  2692. tmp12 += tmp13 - MULTIPLY(z3, FIX(2.373959773)); /* c3+c5-c13 */
  2693. tmp13 = MULTIPLY(z3 - z2, FIX(1.405321284)); /* c1 */
  2694. tmp14 += tmp13 + z4 - MULTIPLY(z3, FIX(1.6906431334)); /* c1+c9-c11 */
  2695. tmp15 += tmp13 + MULTIPLY(z2, FIX(0.674957567)); /* c1+c11-c5 */
  2696. tmp13 = ((z1 - z3) << CONST_BITS) + z4;
  2697. /* Final output stage */
  2698. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10,
  2699. CONST_BITS+PASS1_BITS+3)
  2700. & RANGE_MASK];
  2701. outptr[13] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10,
  2702. CONST_BITS+PASS1_BITS+3)
  2703. & RANGE_MASK];
  2704. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11,
  2705. CONST_BITS+PASS1_BITS+3)
  2706. & RANGE_MASK];
  2707. outptr[12] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11,
  2708. CONST_BITS+PASS1_BITS+3)
  2709. & RANGE_MASK];
  2710. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12,
  2711. CONST_BITS+PASS1_BITS+3)
  2712. & RANGE_MASK];
  2713. outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12,
  2714. CONST_BITS+PASS1_BITS+3)
  2715. & RANGE_MASK];
  2716. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13,
  2717. CONST_BITS+PASS1_BITS+3)
  2718. & RANGE_MASK];
  2719. outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13,
  2720. CONST_BITS+PASS1_BITS+3)
  2721. & RANGE_MASK];
  2722. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14,
  2723. CONST_BITS+PASS1_BITS+3)
  2724. & RANGE_MASK];
  2725. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14,
  2726. CONST_BITS+PASS1_BITS+3)
  2727. & RANGE_MASK];
  2728. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp15,
  2729. CONST_BITS+PASS1_BITS+3)
  2730. & RANGE_MASK];
  2731. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp15,
  2732. CONST_BITS+PASS1_BITS+3)
  2733. & RANGE_MASK];
  2734. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp26 + tmp16,
  2735. CONST_BITS+PASS1_BITS+3)
  2736. & RANGE_MASK];
  2737. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp26 - tmp16,
  2738. CONST_BITS+PASS1_BITS+3)
  2739. & RANGE_MASK];
  2740. wsptr += 8; /* advance pointer to next row */
  2741. }
  2742. }
  2743. /*
  2744. * Perform dequantization and inverse DCT on one block of coefficients,
  2745. * producing a 12x6 output block.
  2746. *
  2747. * 6-point IDCT in pass 1 (columns), 12-point in pass 2 (rows).
  2748. */
  2749. GLOBAL(void)
  2750. jpeg_idct_12x6 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  2751. JCOEFPTR coef_block,
  2752. JSAMPARRAY output_buf, JDIMENSION output_col)
  2753. {
  2754. INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
  2755. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25;
  2756. INT32 z1, z2, z3, z4;
  2757. JCOEFPTR inptr;
  2758. ISLOW_MULT_TYPE * quantptr;
  2759. int * wsptr;
  2760. JSAMPROW outptr;
  2761. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  2762. int ctr;
  2763. int workspace[8*6]; /* buffers data between passes */
  2764. SHIFT_TEMPS
  2765. /* Pass 1: process columns from input, store into work array.
  2766. * 6-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/12).
  2767. */
  2768. inptr = coef_block;
  2769. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  2770. wsptr = workspace;
  2771. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  2772. /* Even part */
  2773. tmp10 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  2774. tmp10 <<= CONST_BITS;
  2775. /* Add fudge factor here for final descale. */
  2776. tmp10 += ONE << (CONST_BITS-PASS1_BITS-1);
  2777. tmp12 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  2778. tmp20 = MULTIPLY(tmp12, FIX(0.707106781)); /* c4 */
  2779. tmp11 = tmp10 + tmp20;
  2780. tmp21 = RIGHT_SHIFT(tmp10 - tmp20 - tmp20, CONST_BITS-PASS1_BITS);
  2781. tmp20 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  2782. tmp10 = MULTIPLY(tmp20, FIX(1.224744871)); /* c2 */
  2783. tmp20 = tmp11 + tmp10;
  2784. tmp22 = tmp11 - tmp10;
  2785. /* Odd part */
  2786. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  2787. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  2788. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  2789. tmp11 = MULTIPLY(z1 + z3, FIX(0.366025404)); /* c5 */
  2790. tmp10 = tmp11 + ((z1 + z2) << CONST_BITS);
  2791. tmp12 = tmp11 + ((z3 - z2) << CONST_BITS);
  2792. tmp11 = (z1 - z2 - z3) << PASS1_BITS;
  2793. /* Final output stage */
  2794. wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS);
  2795. wsptr[8*5] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS);
  2796. wsptr[8*1] = (int) (tmp21 + tmp11);
  2797. wsptr[8*4] = (int) (tmp21 - tmp11);
  2798. wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS);
  2799. wsptr[8*3] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS);
  2800. }
  2801. /* Pass 2: process 6 rows from work array, store into output array.
  2802. * 12-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/24).
  2803. */
  2804. wsptr = workspace;
  2805. for (ctr = 0; ctr < 6; ctr++) {
  2806. outptr = output_buf[ctr] + output_col;
  2807. /* Even part */
  2808. /* Add fudge factor here for final descale. */
  2809. z3 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  2810. z3 <<= CONST_BITS;
  2811. z4 = (INT32) wsptr[4];
  2812. z4 = MULTIPLY(z4, FIX(1.224744871)); /* c4 */
  2813. tmp10 = z3 + z4;
  2814. tmp11 = z3 - z4;
  2815. z1 = (INT32) wsptr[2];
  2816. z4 = MULTIPLY(z1, FIX(1.366025404)); /* c2 */
  2817. z1 <<= CONST_BITS;
  2818. z2 = (INT32) wsptr[6];
  2819. z2 <<= CONST_BITS;
  2820. tmp12 = z1 - z2;
  2821. tmp21 = z3 + tmp12;
  2822. tmp24 = z3 - tmp12;
  2823. tmp12 = z4 + z2;
  2824. tmp20 = tmp10 + tmp12;
  2825. tmp25 = tmp10 - tmp12;
  2826. tmp12 = z4 - z1 - z2;
  2827. tmp22 = tmp11 + tmp12;
  2828. tmp23 = tmp11 - tmp12;
  2829. /* Odd part */
  2830. z1 = (INT32) wsptr[1];
  2831. z2 = (INT32) wsptr[3];
  2832. z3 = (INT32) wsptr[5];
  2833. z4 = (INT32) wsptr[7];
  2834. tmp11 = MULTIPLY(z2, FIX(1.306562965)); /* c3 */
  2835. tmp14 = MULTIPLY(z2, - FIX_0_541196100); /* -c9 */
  2836. tmp10 = z1 + z3;
  2837. tmp15 = MULTIPLY(tmp10 + z4, FIX(0.860918669)); /* c7 */
  2838. tmp12 = tmp15 + MULTIPLY(tmp10, FIX(0.261052384)); /* c5-c7 */
  2839. tmp10 = tmp12 + tmp11 + MULTIPLY(z1, FIX(0.280143716)); /* c1-c5 */
  2840. tmp13 = MULTIPLY(z3 + z4, - FIX(1.045510580)); /* -(c7+c11) */
  2841. tmp12 += tmp13 + tmp14 - MULTIPLY(z3, FIX(1.478575242)); /* c1+c5-c7-c11 */
  2842. tmp13 += tmp15 - tmp11 + MULTIPLY(z4, FIX(1.586706681)); /* c1+c11 */
  2843. tmp15 += tmp14 - MULTIPLY(z1, FIX(0.676326758)) - /* c7-c11 */
  2844. MULTIPLY(z4, FIX(1.982889723)); /* c5+c7 */
  2845. z1 -= z4;
  2846. z2 -= z3;
  2847. z3 = MULTIPLY(z1 + z2, FIX_0_541196100); /* c9 */
  2848. tmp11 = z3 + MULTIPLY(z1, FIX_0_765366865); /* c3-c9 */
  2849. tmp14 = z3 - MULTIPLY(z2, FIX_1_847759065); /* c3+c9 */
  2850. /* Final output stage */
  2851. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10,
  2852. CONST_BITS+PASS1_BITS+3)
  2853. & RANGE_MASK];
  2854. outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10,
  2855. CONST_BITS+PASS1_BITS+3)
  2856. & RANGE_MASK];
  2857. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11,
  2858. CONST_BITS+PASS1_BITS+3)
  2859. & RANGE_MASK];
  2860. outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11,
  2861. CONST_BITS+PASS1_BITS+3)
  2862. & RANGE_MASK];
  2863. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12,
  2864. CONST_BITS+PASS1_BITS+3)
  2865. & RANGE_MASK];
  2866. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12,
  2867. CONST_BITS+PASS1_BITS+3)
  2868. & RANGE_MASK];
  2869. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13,
  2870. CONST_BITS+PASS1_BITS+3)
  2871. & RANGE_MASK];
  2872. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13,
  2873. CONST_BITS+PASS1_BITS+3)
  2874. & RANGE_MASK];
  2875. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14,
  2876. CONST_BITS+PASS1_BITS+3)
  2877. & RANGE_MASK];
  2878. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14,
  2879. CONST_BITS+PASS1_BITS+3)
  2880. & RANGE_MASK];
  2881. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp15,
  2882. CONST_BITS+PASS1_BITS+3)
  2883. & RANGE_MASK];
  2884. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp15,
  2885. CONST_BITS+PASS1_BITS+3)
  2886. & RANGE_MASK];
  2887. wsptr += 8; /* advance pointer to next row */
  2888. }
  2889. }
  2890. /*
  2891. * Perform dequantization and inverse DCT on one block of coefficients,
  2892. * producing a 10x5 output block.
  2893. *
  2894. * 5-point IDCT in pass 1 (columns), 10-point in pass 2 (rows).
  2895. */
  2896. GLOBAL(void)
  2897. jpeg_idct_10x5 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  2898. JCOEFPTR coef_block,
  2899. JSAMPARRAY output_buf, JDIMENSION output_col)
  2900. {
  2901. INT32 tmp10, tmp11, tmp12, tmp13, tmp14;
  2902. INT32 tmp20, tmp21, tmp22, tmp23, tmp24;
  2903. INT32 z1, z2, z3, z4;
  2904. JCOEFPTR inptr;
  2905. ISLOW_MULT_TYPE * quantptr;
  2906. int * wsptr;
  2907. JSAMPROW outptr;
  2908. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  2909. int ctr;
  2910. int workspace[8*5]; /* buffers data between passes */
  2911. SHIFT_TEMPS
  2912. /* Pass 1: process columns from input, store into work array.
  2913. * 5-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/10).
  2914. */
  2915. inptr = coef_block;
  2916. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  2917. wsptr = workspace;
  2918. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  2919. /* Even part */
  2920. tmp12 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  2921. tmp12 <<= CONST_BITS;
  2922. /* Add fudge factor here for final descale. */
  2923. tmp12 += ONE << (CONST_BITS-PASS1_BITS-1);
  2924. tmp13 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  2925. tmp14 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  2926. z1 = MULTIPLY(tmp13 + tmp14, FIX(0.790569415)); /* (c2+c4)/2 */
  2927. z2 = MULTIPLY(tmp13 - tmp14, FIX(0.353553391)); /* (c2-c4)/2 */
  2928. z3 = tmp12 + z2;
  2929. tmp10 = z3 + z1;
  2930. tmp11 = z3 - z1;
  2931. tmp12 -= z2 << 2;
  2932. /* Odd part */
  2933. z2 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  2934. z3 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  2935. z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c3 */
  2936. tmp13 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c1-c3 */
  2937. tmp14 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c1+c3 */
  2938. /* Final output stage */
  2939. wsptr[8*0] = (int) RIGHT_SHIFT(tmp10 + tmp13, CONST_BITS-PASS1_BITS);
  2940. wsptr[8*4] = (int) RIGHT_SHIFT(tmp10 - tmp13, CONST_BITS-PASS1_BITS);
  2941. wsptr[8*1] = (int) RIGHT_SHIFT(tmp11 + tmp14, CONST_BITS-PASS1_BITS);
  2942. wsptr[8*3] = (int) RIGHT_SHIFT(tmp11 - tmp14, CONST_BITS-PASS1_BITS);
  2943. wsptr[8*2] = (int) RIGHT_SHIFT(tmp12, CONST_BITS-PASS1_BITS);
  2944. }
  2945. /* Pass 2: process 5 rows from work array, store into output array.
  2946. * 10-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/20).
  2947. */
  2948. wsptr = workspace;
  2949. for (ctr = 0; ctr < 5; ctr++) {
  2950. outptr = output_buf[ctr] + output_col;
  2951. /* Even part */
  2952. /* Add fudge factor here for final descale. */
  2953. z3 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  2954. z3 <<= CONST_BITS;
  2955. z4 = (INT32) wsptr[4];
  2956. z1 = MULTIPLY(z4, FIX(1.144122806)); /* c4 */
  2957. z2 = MULTIPLY(z4, FIX(0.437016024)); /* c8 */
  2958. tmp10 = z3 + z1;
  2959. tmp11 = z3 - z2;
  2960. tmp22 = z3 - ((z1 - z2) << 1); /* c0 = (c4-c8)*2 */
  2961. z2 = (INT32) wsptr[2];
  2962. z3 = (INT32) wsptr[6];
  2963. z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c6 */
  2964. tmp12 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c2-c6 */
  2965. tmp13 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c2+c6 */
  2966. tmp20 = tmp10 + tmp12;
  2967. tmp24 = tmp10 - tmp12;
  2968. tmp21 = tmp11 + tmp13;
  2969. tmp23 = tmp11 - tmp13;
  2970. /* Odd part */
  2971. z1 = (INT32) wsptr[1];
  2972. z2 = (INT32) wsptr[3];
  2973. z3 = (INT32) wsptr[5];
  2974. z3 <<= CONST_BITS;
  2975. z4 = (INT32) wsptr[7];
  2976. tmp11 = z2 + z4;
  2977. tmp13 = z2 - z4;
  2978. tmp12 = MULTIPLY(tmp13, FIX(0.309016994)); /* (c3-c7)/2 */
  2979. z2 = MULTIPLY(tmp11, FIX(0.951056516)); /* (c3+c7)/2 */
  2980. z4 = z3 + tmp12;
  2981. tmp10 = MULTIPLY(z1, FIX(1.396802247)) + z2 + z4; /* c1 */
  2982. tmp14 = MULTIPLY(z1, FIX(0.221231742)) - z2 + z4; /* c9 */
  2983. z2 = MULTIPLY(tmp11, FIX(0.587785252)); /* (c1-c9)/2 */
  2984. z4 = z3 - tmp12 - (tmp13 << (CONST_BITS - 1));
  2985. tmp12 = ((z1 - tmp13) << CONST_BITS) - z3;
  2986. tmp11 = MULTIPLY(z1, FIX(1.260073511)) - z2 - z4; /* c3 */
  2987. tmp13 = MULTIPLY(z1, FIX(0.642039522)) - z2 + z4; /* c7 */
  2988. /* Final output stage */
  2989. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10,
  2990. CONST_BITS+PASS1_BITS+3)
  2991. & RANGE_MASK];
  2992. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10,
  2993. CONST_BITS+PASS1_BITS+3)
  2994. & RANGE_MASK];
  2995. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11,
  2996. CONST_BITS+PASS1_BITS+3)
  2997. & RANGE_MASK];
  2998. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11,
  2999. CONST_BITS+PASS1_BITS+3)
  3000. & RANGE_MASK];
  3001. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12,
  3002. CONST_BITS+PASS1_BITS+3)
  3003. & RANGE_MASK];
  3004. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12,
  3005. CONST_BITS+PASS1_BITS+3)
  3006. & RANGE_MASK];
  3007. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13,
  3008. CONST_BITS+PASS1_BITS+3)
  3009. & RANGE_MASK];
  3010. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13,
  3011. CONST_BITS+PASS1_BITS+3)
  3012. & RANGE_MASK];
  3013. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14,
  3014. CONST_BITS+PASS1_BITS+3)
  3015. & RANGE_MASK];
  3016. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14,
  3017. CONST_BITS+PASS1_BITS+3)
  3018. & RANGE_MASK];
  3019. wsptr += 8; /* advance pointer to next row */
  3020. }
  3021. }
  3022. /*
  3023. * Perform dequantization and inverse DCT on one block of coefficients,
  3024. * producing a 8x4 output block.
  3025. *
  3026. * 4-point IDCT in pass 1 (columns), 8-point in pass 2 (rows).
  3027. */
  3028. GLOBAL(void)
  3029. jpeg_idct_8x4 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  3030. JCOEFPTR coef_block,
  3031. JSAMPARRAY output_buf, JDIMENSION output_col)
  3032. {
  3033. INT32 tmp0, tmp1, tmp2, tmp3;
  3034. INT32 tmp10, tmp11, tmp12, tmp13;
  3035. INT32 z1, z2, z3;
  3036. JCOEFPTR inptr;
  3037. ISLOW_MULT_TYPE * quantptr;
  3038. int * wsptr;
  3039. JSAMPROW outptr;
  3040. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  3041. int ctr;
  3042. int workspace[8*4]; /* buffers data between passes */
  3043. SHIFT_TEMPS
  3044. /* Pass 1: process columns from input, store into work array.
  3045. * 4-point IDCT kernel,
  3046. * cK represents sqrt(2) * cos(K*pi/16) [refers to 8-point IDCT].
  3047. */
  3048. inptr = coef_block;
  3049. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  3050. wsptr = workspace;
  3051. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  3052. /* Even part */
  3053. tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  3054. tmp2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  3055. tmp10 = (tmp0 + tmp2) << PASS1_BITS;
  3056. tmp12 = (tmp0 - tmp2) << PASS1_BITS;
  3057. /* Odd part */
  3058. /* Same rotation as in the even part of the 8x8 LL&M IDCT */
  3059. z2 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  3060. z3 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  3061. z1 = MULTIPLY(z2 + z3, FIX_0_541196100); /* c6 */
  3062. /* Add fudge factor here for final descale. */
  3063. z1 += ONE << (CONST_BITS-PASS1_BITS-1);
  3064. tmp0 = RIGHT_SHIFT(z1 + MULTIPLY(z2, FIX_0_765366865), /* c2-c6 */
  3065. CONST_BITS-PASS1_BITS);
  3066. tmp2 = RIGHT_SHIFT(z1 - MULTIPLY(z3, FIX_1_847759065), /* c2+c6 */
  3067. CONST_BITS-PASS1_BITS);
  3068. /* Final output stage */
  3069. wsptr[8*0] = (int) (tmp10 + tmp0);
  3070. wsptr[8*3] = (int) (tmp10 - tmp0);
  3071. wsptr[8*1] = (int) (tmp12 + tmp2);
  3072. wsptr[8*2] = (int) (tmp12 - tmp2);
  3073. }
  3074. /* Pass 2: process rows from work array, store into output array.
  3075. * Note that we must descale the results by a factor of 8 == 2**3,
  3076. * and also undo the PASS1_BITS scaling.
  3077. * 8-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/16).
  3078. */
  3079. wsptr = workspace;
  3080. for (ctr = 0; ctr < 4; ctr++) {
  3081. outptr = output_buf[ctr] + output_col;
  3082. /* Even part: reverse the even part of the forward DCT.
  3083. * The rotator is c(-6).
  3084. */
  3085. z2 = (INT32) wsptr[2];
  3086. z3 = (INT32) wsptr[6];
  3087. z1 = MULTIPLY(z2 + z3, FIX_0_541196100); /* c6 */
  3088. tmp2 = z1 + MULTIPLY(z2, FIX_0_765366865); /* c2-c6 */
  3089. tmp3 = z1 - MULTIPLY(z3, FIX_1_847759065); /* c2+c6 */
  3090. /* Add fudge factor here for final descale. */
  3091. z2 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  3092. z3 = (INT32) wsptr[4];
  3093. tmp0 = (z2 + z3) << CONST_BITS;
  3094. tmp1 = (z2 - z3) << CONST_BITS;
  3095. tmp10 = tmp0 + tmp2;
  3096. tmp13 = tmp0 - tmp2;
  3097. tmp11 = tmp1 + tmp3;
  3098. tmp12 = tmp1 - tmp3;
  3099. /* Odd part per figure 8; the matrix is unitary and hence its
  3100. * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
  3101. */
  3102. tmp0 = (INT32) wsptr[7];
  3103. tmp1 = (INT32) wsptr[5];
  3104. tmp2 = (INT32) wsptr[3];
  3105. tmp3 = (INT32) wsptr[1];
  3106. z2 = tmp0 + tmp2;
  3107. z3 = tmp1 + tmp3;
  3108. z1 = MULTIPLY(z2 + z3, FIX_1_175875602); /* c3 */
  3109. z2 = MULTIPLY(z2, - FIX_1_961570560); /* -c3-c5 */
  3110. z3 = MULTIPLY(z3, - FIX_0_390180644); /* -c3+c5 */
  3111. z2 += z1;
  3112. z3 += z1;
  3113. z1 = MULTIPLY(tmp0 + tmp3, - FIX_0_899976223); /* -c3+c7 */
  3114. tmp0 = MULTIPLY(tmp0, FIX_0_298631336); /* -c1+c3+c5-c7 */
  3115. tmp3 = MULTIPLY(tmp3, FIX_1_501321110); /* c1+c3-c5-c7 */
  3116. tmp0 += z1 + z2;
  3117. tmp3 += z1 + z3;
  3118. z1 = MULTIPLY(tmp1 + tmp2, - FIX_2_562915447); /* -c1-c3 */
  3119. tmp1 = MULTIPLY(tmp1, FIX_2_053119869); /* c1+c3-c5+c7 */
  3120. tmp2 = MULTIPLY(tmp2, FIX_3_072711026); /* c1+c3+c5-c7 */
  3121. tmp1 += z1 + z3;
  3122. tmp2 += z1 + z2;
  3123. /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
  3124. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp3,
  3125. CONST_BITS+PASS1_BITS+3)
  3126. & RANGE_MASK];
  3127. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp3,
  3128. CONST_BITS+PASS1_BITS+3)
  3129. & RANGE_MASK];
  3130. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp2,
  3131. CONST_BITS+PASS1_BITS+3)
  3132. & RANGE_MASK];
  3133. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp2,
  3134. CONST_BITS+PASS1_BITS+3)
  3135. & RANGE_MASK];
  3136. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp1,
  3137. CONST_BITS+PASS1_BITS+3)
  3138. & RANGE_MASK];
  3139. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp1,
  3140. CONST_BITS+PASS1_BITS+3)
  3141. & RANGE_MASK];
  3142. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp13 + tmp0,
  3143. CONST_BITS+PASS1_BITS+3)
  3144. & RANGE_MASK];
  3145. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp13 - tmp0,
  3146. CONST_BITS+PASS1_BITS+3)
  3147. & RANGE_MASK];
  3148. wsptr += DCTSIZE; /* advance pointer to next row */
  3149. }
  3150. }
  3151. /*
  3152. * Perform dequantization and inverse DCT on one block of coefficients,
  3153. * producing a reduced-size 6x3 output block.
  3154. *
  3155. * 3-point IDCT in pass 1 (columns), 6-point in pass 2 (rows).
  3156. */
  3157. GLOBAL(void)
  3158. jpeg_idct_6x3 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  3159. JCOEFPTR coef_block,
  3160. JSAMPARRAY output_buf, JDIMENSION output_col)
  3161. {
  3162. INT32 tmp0, tmp1, tmp2, tmp10, tmp11, tmp12;
  3163. INT32 z1, z2, z3;
  3164. JCOEFPTR inptr;
  3165. ISLOW_MULT_TYPE * quantptr;
  3166. int * wsptr;
  3167. JSAMPROW outptr;
  3168. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  3169. int ctr;
  3170. int workspace[6*3]; /* buffers data between passes */
  3171. SHIFT_TEMPS
  3172. /* Pass 1: process columns from input, store into work array.
  3173. * 3-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/6).
  3174. */
  3175. inptr = coef_block;
  3176. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  3177. wsptr = workspace;
  3178. for (ctr = 0; ctr < 6; ctr++, inptr++, quantptr++, wsptr++) {
  3179. /* Even part */
  3180. tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  3181. tmp0 <<= CONST_BITS;
  3182. /* Add fudge factor here for final descale. */
  3183. tmp0 += ONE << (CONST_BITS-PASS1_BITS-1);
  3184. tmp2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  3185. tmp12 = MULTIPLY(tmp2, FIX(0.707106781)); /* c2 */
  3186. tmp10 = tmp0 + tmp12;
  3187. tmp2 = tmp0 - tmp12 - tmp12;
  3188. /* Odd part */
  3189. tmp12 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  3190. tmp0 = MULTIPLY(tmp12, FIX(1.224744871)); /* c1 */
  3191. /* Final output stage */
  3192. wsptr[6*0] = (int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS-PASS1_BITS);
  3193. wsptr[6*2] = (int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS-PASS1_BITS);
  3194. wsptr[6*1] = (int) RIGHT_SHIFT(tmp2, CONST_BITS-PASS1_BITS);
  3195. }
  3196. /* Pass 2: process 3 rows from work array, store into output array.
  3197. * 6-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/12).
  3198. */
  3199. wsptr = workspace;
  3200. for (ctr = 0; ctr < 3; ctr++) {
  3201. outptr = output_buf[ctr] + output_col;
  3202. /* Even part */
  3203. /* Add fudge factor here for final descale. */
  3204. tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  3205. tmp0 <<= CONST_BITS;
  3206. tmp2 = (INT32) wsptr[4];
  3207. tmp10 = MULTIPLY(tmp2, FIX(0.707106781)); /* c4 */
  3208. tmp1 = tmp0 + tmp10;
  3209. tmp11 = tmp0 - tmp10 - tmp10;
  3210. tmp10 = (INT32) wsptr[2];
  3211. tmp0 = MULTIPLY(tmp10, FIX(1.224744871)); /* c2 */
  3212. tmp10 = tmp1 + tmp0;
  3213. tmp12 = tmp1 - tmp0;
  3214. /* Odd part */
  3215. z1 = (INT32) wsptr[1];
  3216. z2 = (INT32) wsptr[3];
  3217. z3 = (INT32) wsptr[5];
  3218. tmp1 = MULTIPLY(z1 + z3, FIX(0.366025404)); /* c5 */
  3219. tmp0 = tmp1 + ((z1 + z2) << CONST_BITS);
  3220. tmp2 = tmp1 + ((z3 - z2) << CONST_BITS);
  3221. tmp1 = (z1 - z2 - z3) << CONST_BITS;
  3222. /* Final output stage */
  3223. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0,
  3224. CONST_BITS+PASS1_BITS+3)
  3225. & RANGE_MASK];
  3226. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0,
  3227. CONST_BITS+PASS1_BITS+3)
  3228. & RANGE_MASK];
  3229. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp1,
  3230. CONST_BITS+PASS1_BITS+3)
  3231. & RANGE_MASK];
  3232. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp1,
  3233. CONST_BITS+PASS1_BITS+3)
  3234. & RANGE_MASK];
  3235. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp2,
  3236. CONST_BITS+PASS1_BITS+3)
  3237. & RANGE_MASK];
  3238. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp2,
  3239. CONST_BITS+PASS1_BITS+3)
  3240. & RANGE_MASK];
  3241. wsptr += 6; /* advance pointer to next row */
  3242. }
  3243. }
  3244. /*
  3245. * Perform dequantization and inverse DCT on one block of coefficients,
  3246. * producing a 4x2 output block.
  3247. *
  3248. * 2-point IDCT in pass 1 (columns), 4-point in pass 2 (rows).
  3249. */
  3250. GLOBAL(void)
  3251. jpeg_idct_4x2 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  3252. JCOEFPTR coef_block,
  3253. JSAMPARRAY output_buf, JDIMENSION output_col)
  3254. {
  3255. INT32 tmp0, tmp2, tmp10, tmp12;
  3256. INT32 z1, z2, z3;
  3257. JCOEFPTR inptr;
  3258. ISLOW_MULT_TYPE * quantptr;
  3259. INT32 * wsptr;
  3260. JSAMPROW outptr;
  3261. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  3262. int ctr;
  3263. INT32 workspace[4*2]; /* buffers data between passes */
  3264. SHIFT_TEMPS
  3265. /* Pass 1: process columns from input, store into work array. */
  3266. inptr = coef_block;
  3267. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  3268. wsptr = workspace;
  3269. for (ctr = 0; ctr < 4; ctr++, inptr++, quantptr++, wsptr++) {
  3270. /* Even part */
  3271. tmp10 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  3272. /* Odd part */
  3273. tmp0 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  3274. /* Final output stage */
  3275. wsptr[4*0] = tmp10 + tmp0;
  3276. wsptr[4*1] = tmp10 - tmp0;
  3277. }
  3278. /* Pass 2: process 2 rows from work array, store into output array.
  3279. * 4-point IDCT kernel,
  3280. * cK represents sqrt(2) * cos(K*pi/16) [refers to 8-point IDCT].
  3281. */
  3282. wsptr = workspace;
  3283. for (ctr = 0; ctr < 2; ctr++) {
  3284. outptr = output_buf[ctr] + output_col;
  3285. /* Even part */
  3286. /* Add fudge factor here for final descale. */
  3287. tmp0 = wsptr[0] + (ONE << 2);
  3288. tmp2 = wsptr[2];
  3289. tmp10 = (tmp0 + tmp2) << CONST_BITS;
  3290. tmp12 = (tmp0 - tmp2) << CONST_BITS;
  3291. /* Odd part */
  3292. /* Same rotation as in the even part of the 8x8 LL&M IDCT */
  3293. z2 = wsptr[1];
  3294. z3 = wsptr[3];
  3295. z1 = MULTIPLY(z2 + z3, FIX_0_541196100); /* c6 */
  3296. tmp0 = z1 + MULTIPLY(z2, FIX_0_765366865); /* c2-c6 */
  3297. tmp2 = z1 - MULTIPLY(z3, FIX_1_847759065); /* c2+c6 */
  3298. /* Final output stage */
  3299. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0,
  3300. CONST_BITS+3)
  3301. & RANGE_MASK];
  3302. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0,
  3303. CONST_BITS+3)
  3304. & RANGE_MASK];
  3305. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp2,
  3306. CONST_BITS+3)
  3307. & RANGE_MASK];
  3308. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp2,
  3309. CONST_BITS+3)
  3310. & RANGE_MASK];
  3311. wsptr += 4; /* advance pointer to next row */
  3312. }
  3313. }
  3314. /*
  3315. * Perform dequantization and inverse DCT on one block of coefficients,
  3316. * producing a 2x1 output block.
  3317. *
  3318. * 1-point IDCT in pass 1 (columns), 2-point in pass 2 (rows).
  3319. */
  3320. GLOBAL(void)
  3321. jpeg_idct_2x1 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  3322. JCOEFPTR coef_block,
  3323. JSAMPARRAY output_buf, JDIMENSION output_col)
  3324. {
  3325. INT32 tmp0, tmp1;
  3326. ISLOW_MULT_TYPE * quantptr;
  3327. JSAMPROW outptr;
  3328. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  3329. SHIFT_TEMPS
  3330. /* Pass 1: empty. */
  3331. /* Pass 2: process 1 row from input, store into output array. */
  3332. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  3333. outptr = output_buf[0] + output_col;
  3334. /* Even part */
  3335. tmp0 = DEQUANTIZE(coef_block[0], quantptr[0]);
  3336. /* Add fudge factor here for final descale. */
  3337. tmp0 += ONE << 2;
  3338. /* Odd part */
  3339. tmp1 = DEQUANTIZE(coef_block[1], quantptr[1]);
  3340. /* Final output stage */
  3341. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp0 + tmp1, 3) & RANGE_MASK];
  3342. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp0 - tmp1, 3) & RANGE_MASK];
  3343. }
  3344. /*
  3345. * Perform dequantization and inverse DCT on one block of coefficients,
  3346. * producing a 8x16 output block.
  3347. *
  3348. * 16-point IDCT in pass 1 (columns), 8-point in pass 2 (rows).
  3349. */
  3350. GLOBAL(void)
  3351. jpeg_idct_8x16 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  3352. JCOEFPTR coef_block,
  3353. JSAMPARRAY output_buf, JDIMENSION output_col)
  3354. {
  3355. INT32 tmp0, tmp1, tmp2, tmp3, tmp10, tmp11, tmp12, tmp13;
  3356. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26, tmp27;
  3357. INT32 z1, z2, z3, z4;
  3358. JCOEFPTR inptr;
  3359. ISLOW_MULT_TYPE * quantptr;
  3360. int * wsptr;
  3361. JSAMPROW outptr;
  3362. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  3363. int ctr;
  3364. int workspace[8*16]; /* buffers data between passes */
  3365. SHIFT_TEMPS
  3366. /* Pass 1: process columns from input, store into work array.
  3367. * 16-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/32).
  3368. */
  3369. inptr = coef_block;
  3370. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  3371. wsptr = workspace;
  3372. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  3373. /* Even part */
  3374. tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  3375. tmp0 <<= CONST_BITS;
  3376. /* Add fudge factor here for final descale. */
  3377. tmp0 += ONE << (CONST_BITS-PASS1_BITS-1);
  3378. z1 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  3379. tmp1 = MULTIPLY(z1, FIX(1.306562965)); /* c4[16] = c2[8] */
  3380. tmp2 = MULTIPLY(z1, FIX_0_541196100); /* c12[16] = c6[8] */
  3381. tmp10 = tmp0 + tmp1;
  3382. tmp11 = tmp0 - tmp1;
  3383. tmp12 = tmp0 + tmp2;
  3384. tmp13 = tmp0 - tmp2;
  3385. z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  3386. z2 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  3387. z3 = z1 - z2;
  3388. z4 = MULTIPLY(z3, FIX(0.275899379)); /* c14[16] = c7[8] */
  3389. z3 = MULTIPLY(z3, FIX(1.387039845)); /* c2[16] = c1[8] */
  3390. tmp0 = z3 + MULTIPLY(z2, FIX_2_562915447); /* (c6+c2)[16] = (c3+c1)[8] */
  3391. tmp1 = z4 + MULTIPLY(z1, FIX_0_899976223); /* (c6-c14)[16] = (c3-c7)[8] */
  3392. tmp2 = z3 - MULTIPLY(z1, FIX(0.601344887)); /* (c2-c10)[16] = (c1-c5)[8] */
  3393. tmp3 = z4 - MULTIPLY(z2, FIX(0.509795579)); /* (c10-c14)[16] = (c5-c7)[8] */
  3394. tmp20 = tmp10 + tmp0;
  3395. tmp27 = tmp10 - tmp0;
  3396. tmp21 = tmp12 + tmp1;
  3397. tmp26 = tmp12 - tmp1;
  3398. tmp22 = tmp13 + tmp2;
  3399. tmp25 = tmp13 - tmp2;
  3400. tmp23 = tmp11 + tmp3;
  3401. tmp24 = tmp11 - tmp3;
  3402. /* Odd part */
  3403. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  3404. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  3405. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  3406. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  3407. tmp11 = z1 + z3;
  3408. tmp1 = MULTIPLY(z1 + z2, FIX(1.353318001)); /* c3 */
  3409. tmp2 = MULTIPLY(tmp11, FIX(1.247225013)); /* c5 */
  3410. tmp3 = MULTIPLY(z1 + z4, FIX(1.093201867)); /* c7 */
  3411. tmp10 = MULTIPLY(z1 - z4, FIX(0.897167586)); /* c9 */
  3412. tmp11 = MULTIPLY(tmp11, FIX(0.666655658)); /* c11 */
  3413. tmp12 = MULTIPLY(z1 - z2, FIX(0.410524528)); /* c13 */
  3414. tmp0 = tmp1 + tmp2 + tmp3 -
  3415. MULTIPLY(z1, FIX(2.286341144)); /* c7+c5+c3-c1 */
  3416. tmp13 = tmp10 + tmp11 + tmp12 -
  3417. MULTIPLY(z1, FIX(1.835730603)); /* c9+c11+c13-c15 */
  3418. z1 = MULTIPLY(z2 + z3, FIX(0.138617169)); /* c15 */
  3419. tmp1 += z1 + MULTIPLY(z2, FIX(0.071888074)); /* c9+c11-c3-c15 */
  3420. tmp2 += z1 - MULTIPLY(z3, FIX(1.125726048)); /* c5+c7+c15-c3 */
  3421. z1 = MULTIPLY(z3 - z2, FIX(1.407403738)); /* c1 */
  3422. tmp11 += z1 - MULTIPLY(z3, FIX(0.766367282)); /* c1+c11-c9-c13 */
  3423. tmp12 += z1 + MULTIPLY(z2, FIX(1.971951411)); /* c1+c5+c13-c7 */
  3424. z2 += z4;
  3425. z1 = MULTIPLY(z2, - FIX(0.666655658)); /* -c11 */
  3426. tmp1 += z1;
  3427. tmp3 += z1 + MULTIPLY(z4, FIX(1.065388962)); /* c3+c11+c15-c7 */
  3428. z2 = MULTIPLY(z2, - FIX(1.247225013)); /* -c5 */
  3429. tmp10 += z2 + MULTIPLY(z4, FIX(3.141271809)); /* c1+c5+c9-c13 */
  3430. tmp12 += z2;
  3431. z2 = MULTIPLY(z3 + z4, - FIX(1.353318001)); /* -c3 */
  3432. tmp2 += z2;
  3433. tmp3 += z2;
  3434. z2 = MULTIPLY(z4 - z3, FIX(0.410524528)); /* c13 */
  3435. tmp10 += z2;
  3436. tmp11 += z2;
  3437. /* Final output stage */
  3438. wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp0, CONST_BITS-PASS1_BITS);
  3439. wsptr[8*15] = (int) RIGHT_SHIFT(tmp20 - tmp0, CONST_BITS-PASS1_BITS);
  3440. wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp1, CONST_BITS-PASS1_BITS);
  3441. wsptr[8*14] = (int) RIGHT_SHIFT(tmp21 - tmp1, CONST_BITS-PASS1_BITS);
  3442. wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp2, CONST_BITS-PASS1_BITS);
  3443. wsptr[8*13] = (int) RIGHT_SHIFT(tmp22 - tmp2, CONST_BITS-PASS1_BITS);
  3444. wsptr[8*3] = (int) RIGHT_SHIFT(tmp23 + tmp3, CONST_BITS-PASS1_BITS);
  3445. wsptr[8*12] = (int) RIGHT_SHIFT(tmp23 - tmp3, CONST_BITS-PASS1_BITS);
  3446. wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp10, CONST_BITS-PASS1_BITS);
  3447. wsptr[8*11] = (int) RIGHT_SHIFT(tmp24 - tmp10, CONST_BITS-PASS1_BITS);
  3448. wsptr[8*5] = (int) RIGHT_SHIFT(tmp25 + tmp11, CONST_BITS-PASS1_BITS);
  3449. wsptr[8*10] = (int) RIGHT_SHIFT(tmp25 - tmp11, CONST_BITS-PASS1_BITS);
  3450. wsptr[8*6] = (int) RIGHT_SHIFT(tmp26 + tmp12, CONST_BITS-PASS1_BITS);
  3451. wsptr[8*9] = (int) RIGHT_SHIFT(tmp26 - tmp12, CONST_BITS-PASS1_BITS);
  3452. wsptr[8*7] = (int) RIGHT_SHIFT(tmp27 + tmp13, CONST_BITS-PASS1_BITS);
  3453. wsptr[8*8] = (int) RIGHT_SHIFT(tmp27 - tmp13, CONST_BITS-PASS1_BITS);
  3454. }
  3455. /* Pass 2: process rows from work array, store into output array.
  3456. * Note that we must descale the results by a factor of 8 == 2**3,
  3457. * and also undo the PASS1_BITS scaling.
  3458. * 8-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/16).
  3459. */
  3460. wsptr = workspace;
  3461. for (ctr = 0; ctr < 16; ctr++) {
  3462. outptr = output_buf[ctr] + output_col;
  3463. /* Even part: reverse the even part of the forward DCT.
  3464. * The rotator is c(-6).
  3465. */
  3466. z2 = (INT32) wsptr[2];
  3467. z3 = (INT32) wsptr[6];
  3468. z1 = MULTIPLY(z2 + z3, FIX_0_541196100); /* c6 */
  3469. tmp2 = z1 + MULTIPLY(z2, FIX_0_765366865); /* c2-c6 */
  3470. tmp3 = z1 - MULTIPLY(z3, FIX_1_847759065); /* c2+c6 */
  3471. /* Add fudge factor here for final descale. */
  3472. z2 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  3473. z3 = (INT32) wsptr[4];
  3474. tmp0 = (z2 + z3) << CONST_BITS;
  3475. tmp1 = (z2 - z3) << CONST_BITS;
  3476. tmp10 = tmp0 + tmp2;
  3477. tmp13 = tmp0 - tmp2;
  3478. tmp11 = tmp1 + tmp3;
  3479. tmp12 = tmp1 - tmp3;
  3480. /* Odd part per figure 8; the matrix is unitary and hence its
  3481. * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
  3482. */
  3483. tmp0 = (INT32) wsptr[7];
  3484. tmp1 = (INT32) wsptr[5];
  3485. tmp2 = (INT32) wsptr[3];
  3486. tmp3 = (INT32) wsptr[1];
  3487. z2 = tmp0 + tmp2;
  3488. z3 = tmp1 + tmp3;
  3489. z1 = MULTIPLY(z2 + z3, FIX_1_175875602); /* c3 */
  3490. z2 = MULTIPLY(z2, - FIX_1_961570560); /* -c3-c5 */
  3491. z3 = MULTIPLY(z3, - FIX_0_390180644); /* -c3+c5 */
  3492. z2 += z1;
  3493. z3 += z1;
  3494. z1 = MULTIPLY(tmp0 + tmp3, - FIX_0_899976223); /* -c3+c7 */
  3495. tmp0 = MULTIPLY(tmp0, FIX_0_298631336); /* -c1+c3+c5-c7 */
  3496. tmp3 = MULTIPLY(tmp3, FIX_1_501321110); /* c1+c3-c5-c7 */
  3497. tmp0 += z1 + z2;
  3498. tmp3 += z1 + z3;
  3499. z1 = MULTIPLY(tmp1 + tmp2, - FIX_2_562915447); /* -c1-c3 */
  3500. tmp1 = MULTIPLY(tmp1, FIX_2_053119869); /* c1+c3-c5+c7 */
  3501. tmp2 = MULTIPLY(tmp2, FIX_3_072711026); /* c1+c3+c5-c7 */
  3502. tmp1 += z1 + z3;
  3503. tmp2 += z1 + z2;
  3504. /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
  3505. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp3,
  3506. CONST_BITS+PASS1_BITS+3)
  3507. & RANGE_MASK];
  3508. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp3,
  3509. CONST_BITS+PASS1_BITS+3)
  3510. & RANGE_MASK];
  3511. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp2,
  3512. CONST_BITS+PASS1_BITS+3)
  3513. & RANGE_MASK];
  3514. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp2,
  3515. CONST_BITS+PASS1_BITS+3)
  3516. & RANGE_MASK];
  3517. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp1,
  3518. CONST_BITS+PASS1_BITS+3)
  3519. & RANGE_MASK];
  3520. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp1,
  3521. CONST_BITS+PASS1_BITS+3)
  3522. & RANGE_MASK];
  3523. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp13 + tmp0,
  3524. CONST_BITS+PASS1_BITS+3)
  3525. & RANGE_MASK];
  3526. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp13 - tmp0,
  3527. CONST_BITS+PASS1_BITS+3)
  3528. & RANGE_MASK];
  3529. wsptr += DCTSIZE; /* advance pointer to next row */
  3530. }
  3531. }
  3532. /*
  3533. * Perform dequantization and inverse DCT on one block of coefficients,
  3534. * producing a 7x14 output block.
  3535. *
  3536. * 14-point IDCT in pass 1 (columns), 7-point in pass 2 (rows).
  3537. */
  3538. GLOBAL(void)
  3539. jpeg_idct_7x14 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  3540. JCOEFPTR coef_block,
  3541. JSAMPARRAY output_buf, JDIMENSION output_col)
  3542. {
  3543. INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16;
  3544. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26;
  3545. INT32 z1, z2, z3, z4;
  3546. JCOEFPTR inptr;
  3547. ISLOW_MULT_TYPE * quantptr;
  3548. int * wsptr;
  3549. JSAMPROW outptr;
  3550. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  3551. int ctr;
  3552. int workspace[7*14]; /* buffers data between passes */
  3553. SHIFT_TEMPS
  3554. /* Pass 1: process columns from input, store into work array.
  3555. * 14-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/28).
  3556. */
  3557. inptr = coef_block;
  3558. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  3559. wsptr = workspace;
  3560. for (ctr = 0; ctr < 7; ctr++, inptr++, quantptr++, wsptr++) {
  3561. /* Even part */
  3562. z1 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  3563. z1 <<= CONST_BITS;
  3564. /* Add fudge factor here for final descale. */
  3565. z1 += ONE << (CONST_BITS-PASS1_BITS-1);
  3566. z4 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  3567. z2 = MULTIPLY(z4, FIX(1.274162392)); /* c4 */
  3568. z3 = MULTIPLY(z4, FIX(0.314692123)); /* c12 */
  3569. z4 = MULTIPLY(z4, FIX(0.881747734)); /* c8 */
  3570. tmp10 = z1 + z2;
  3571. tmp11 = z1 + z3;
  3572. tmp12 = z1 - z4;
  3573. tmp23 = RIGHT_SHIFT(z1 - ((z2 + z3 - z4) << 1), /* c0 = (c4+c12-c8)*2 */
  3574. CONST_BITS-PASS1_BITS);
  3575. z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  3576. z2 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  3577. z3 = MULTIPLY(z1 + z2, FIX(1.105676686)); /* c6 */
  3578. tmp13 = z3 + MULTIPLY(z1, FIX(0.273079590)); /* c2-c6 */
  3579. tmp14 = z3 - MULTIPLY(z2, FIX(1.719280954)); /* c6+c10 */
  3580. tmp15 = MULTIPLY(z1, FIX(0.613604268)) - /* c10 */
  3581. MULTIPLY(z2, FIX(1.378756276)); /* c2 */
  3582. tmp20 = tmp10 + tmp13;
  3583. tmp26 = tmp10 - tmp13;
  3584. tmp21 = tmp11 + tmp14;
  3585. tmp25 = tmp11 - tmp14;
  3586. tmp22 = tmp12 + tmp15;
  3587. tmp24 = tmp12 - tmp15;
  3588. /* Odd part */
  3589. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  3590. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  3591. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  3592. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  3593. tmp13 = z4 << CONST_BITS;
  3594. tmp14 = z1 + z3;
  3595. tmp11 = MULTIPLY(z1 + z2, FIX(1.334852607)); /* c3 */
  3596. tmp12 = MULTIPLY(tmp14, FIX(1.197448846)); /* c5 */
  3597. tmp10 = tmp11 + tmp12 + tmp13 - MULTIPLY(z1, FIX(1.126980169)); /* c3+c5-c1 */
  3598. tmp14 = MULTIPLY(tmp14, FIX(0.752406978)); /* c9 */
  3599. tmp16 = tmp14 - MULTIPLY(z1, FIX(1.061150426)); /* c9+c11-c13 */
  3600. z1 -= z2;
  3601. tmp15 = MULTIPLY(z1, FIX(0.467085129)) - tmp13; /* c11 */
  3602. tmp16 += tmp15;
  3603. z1 += z4;
  3604. z4 = MULTIPLY(z2 + z3, - FIX(0.158341681)) - tmp13; /* -c13 */
  3605. tmp11 += z4 - MULTIPLY(z2, FIX(0.424103948)); /* c3-c9-c13 */
  3606. tmp12 += z4 - MULTIPLY(z3, FIX(2.373959773)); /* c3+c5-c13 */
  3607. z4 = MULTIPLY(z3 - z2, FIX(1.405321284)); /* c1 */
  3608. tmp14 += z4 + tmp13 - MULTIPLY(z3, FIX(1.6906431334)); /* c1+c9-c11 */
  3609. tmp15 += z4 + MULTIPLY(z2, FIX(0.674957567)); /* c1+c11-c5 */
  3610. tmp13 = (z1 - z3) << PASS1_BITS;
  3611. /* Final output stage */
  3612. wsptr[7*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS);
  3613. wsptr[7*13] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS);
  3614. wsptr[7*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS);
  3615. wsptr[7*12] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS);
  3616. wsptr[7*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS);
  3617. wsptr[7*11] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS);
  3618. wsptr[7*3] = (int) (tmp23 + tmp13);
  3619. wsptr[7*10] = (int) (tmp23 - tmp13);
  3620. wsptr[7*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS);
  3621. wsptr[7*9] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS);
  3622. wsptr[7*5] = (int) RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS-PASS1_BITS);
  3623. wsptr[7*8] = (int) RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS-PASS1_BITS);
  3624. wsptr[7*6] = (int) RIGHT_SHIFT(tmp26 + tmp16, CONST_BITS-PASS1_BITS);
  3625. wsptr[7*7] = (int) RIGHT_SHIFT(tmp26 - tmp16, CONST_BITS-PASS1_BITS);
  3626. }
  3627. /* Pass 2: process 14 rows from work array, store into output array.
  3628. * 7-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/14).
  3629. */
  3630. wsptr = workspace;
  3631. for (ctr = 0; ctr < 14; ctr++) {
  3632. outptr = output_buf[ctr] + output_col;
  3633. /* Even part */
  3634. /* Add fudge factor here for final descale. */
  3635. tmp23 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  3636. tmp23 <<= CONST_BITS;
  3637. z1 = (INT32) wsptr[2];
  3638. z2 = (INT32) wsptr[4];
  3639. z3 = (INT32) wsptr[6];
  3640. tmp20 = MULTIPLY(z2 - z3, FIX(0.881747734)); /* c4 */
  3641. tmp22 = MULTIPLY(z1 - z2, FIX(0.314692123)); /* c6 */
  3642. tmp21 = tmp20 + tmp22 + tmp23 - MULTIPLY(z2, FIX(1.841218003)); /* c2+c4-c6 */
  3643. tmp10 = z1 + z3;
  3644. z2 -= tmp10;
  3645. tmp10 = MULTIPLY(tmp10, FIX(1.274162392)) + tmp23; /* c2 */
  3646. tmp20 += tmp10 - MULTIPLY(z3, FIX(0.077722536)); /* c2-c4-c6 */
  3647. tmp22 += tmp10 - MULTIPLY(z1, FIX(2.470602249)); /* c2+c4+c6 */
  3648. tmp23 += MULTIPLY(z2, FIX(1.414213562)); /* c0 */
  3649. /* Odd part */
  3650. z1 = (INT32) wsptr[1];
  3651. z2 = (INT32) wsptr[3];
  3652. z3 = (INT32) wsptr[5];
  3653. tmp11 = MULTIPLY(z1 + z2, FIX(0.935414347)); /* (c3+c1-c5)/2 */
  3654. tmp12 = MULTIPLY(z1 - z2, FIX(0.170262339)); /* (c3+c5-c1)/2 */
  3655. tmp10 = tmp11 - tmp12;
  3656. tmp11 += tmp12;
  3657. tmp12 = MULTIPLY(z2 + z3, - FIX(1.378756276)); /* -c1 */
  3658. tmp11 += tmp12;
  3659. z2 = MULTIPLY(z1 + z3, FIX(0.613604268)); /* c5 */
  3660. tmp10 += z2;
  3661. tmp12 += z2 + MULTIPLY(z3, FIX(1.870828693)); /* c3+c1-c5 */
  3662. /* Final output stage */
  3663. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10,
  3664. CONST_BITS+PASS1_BITS+3)
  3665. & RANGE_MASK];
  3666. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10,
  3667. CONST_BITS+PASS1_BITS+3)
  3668. & RANGE_MASK];
  3669. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11,
  3670. CONST_BITS+PASS1_BITS+3)
  3671. & RANGE_MASK];
  3672. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11,
  3673. CONST_BITS+PASS1_BITS+3)
  3674. & RANGE_MASK];
  3675. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12,
  3676. CONST_BITS+PASS1_BITS+3)
  3677. & RANGE_MASK];
  3678. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12,
  3679. CONST_BITS+PASS1_BITS+3)
  3680. & RANGE_MASK];
  3681. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23,
  3682. CONST_BITS+PASS1_BITS+3)
  3683. & RANGE_MASK];
  3684. wsptr += 7; /* advance pointer to next row */
  3685. }
  3686. }
  3687. /*
  3688. * Perform dequantization and inverse DCT on one block of coefficients,
  3689. * producing a 6x12 output block.
  3690. *
  3691. * 12-point IDCT in pass 1 (columns), 6-point in pass 2 (rows).
  3692. */
  3693. GLOBAL(void)
  3694. jpeg_idct_6x12 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  3695. JCOEFPTR coef_block,
  3696. JSAMPARRAY output_buf, JDIMENSION output_col)
  3697. {
  3698. INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
  3699. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25;
  3700. INT32 z1, z2, z3, z4;
  3701. JCOEFPTR inptr;
  3702. ISLOW_MULT_TYPE * quantptr;
  3703. int * wsptr;
  3704. JSAMPROW outptr;
  3705. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  3706. int ctr;
  3707. int workspace[6*12]; /* buffers data between passes */
  3708. SHIFT_TEMPS
  3709. /* Pass 1: process columns from input, store into work array.
  3710. * 12-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/24).
  3711. */
  3712. inptr = coef_block;
  3713. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  3714. wsptr = workspace;
  3715. for (ctr = 0; ctr < 6; ctr++, inptr++, quantptr++, wsptr++) {
  3716. /* Even part */
  3717. z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  3718. z3 <<= CONST_BITS;
  3719. /* Add fudge factor here for final descale. */
  3720. z3 += ONE << (CONST_BITS-PASS1_BITS-1);
  3721. z4 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  3722. z4 = MULTIPLY(z4, FIX(1.224744871)); /* c4 */
  3723. tmp10 = z3 + z4;
  3724. tmp11 = z3 - z4;
  3725. z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  3726. z4 = MULTIPLY(z1, FIX(1.366025404)); /* c2 */
  3727. z1 <<= CONST_BITS;
  3728. z2 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  3729. z2 <<= CONST_BITS;
  3730. tmp12 = z1 - z2;
  3731. tmp21 = z3 + tmp12;
  3732. tmp24 = z3 - tmp12;
  3733. tmp12 = z4 + z2;
  3734. tmp20 = tmp10 + tmp12;
  3735. tmp25 = tmp10 - tmp12;
  3736. tmp12 = z4 - z1 - z2;
  3737. tmp22 = tmp11 + tmp12;
  3738. tmp23 = tmp11 - tmp12;
  3739. /* Odd part */
  3740. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  3741. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  3742. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  3743. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  3744. tmp11 = MULTIPLY(z2, FIX(1.306562965)); /* c3 */
  3745. tmp14 = MULTIPLY(z2, - FIX_0_541196100); /* -c9 */
  3746. tmp10 = z1 + z3;
  3747. tmp15 = MULTIPLY(tmp10 + z4, FIX(0.860918669)); /* c7 */
  3748. tmp12 = tmp15 + MULTIPLY(tmp10, FIX(0.261052384)); /* c5-c7 */
  3749. tmp10 = tmp12 + tmp11 + MULTIPLY(z1, FIX(0.280143716)); /* c1-c5 */
  3750. tmp13 = MULTIPLY(z3 + z4, - FIX(1.045510580)); /* -(c7+c11) */
  3751. tmp12 += tmp13 + tmp14 - MULTIPLY(z3, FIX(1.478575242)); /* c1+c5-c7-c11 */
  3752. tmp13 += tmp15 - tmp11 + MULTIPLY(z4, FIX(1.586706681)); /* c1+c11 */
  3753. tmp15 += tmp14 - MULTIPLY(z1, FIX(0.676326758)) - /* c7-c11 */
  3754. MULTIPLY(z4, FIX(1.982889723)); /* c5+c7 */
  3755. z1 -= z4;
  3756. z2 -= z3;
  3757. z3 = MULTIPLY(z1 + z2, FIX_0_541196100); /* c9 */
  3758. tmp11 = z3 + MULTIPLY(z1, FIX_0_765366865); /* c3-c9 */
  3759. tmp14 = z3 - MULTIPLY(z2, FIX_1_847759065); /* c3+c9 */
  3760. /* Final output stage */
  3761. wsptr[6*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS);
  3762. wsptr[6*11] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS);
  3763. wsptr[6*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS);
  3764. wsptr[6*10] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS);
  3765. wsptr[6*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS);
  3766. wsptr[6*9] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS);
  3767. wsptr[6*3] = (int) RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS-PASS1_BITS);
  3768. wsptr[6*8] = (int) RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS-PASS1_BITS);
  3769. wsptr[6*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS);
  3770. wsptr[6*7] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS);
  3771. wsptr[6*5] = (int) RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS-PASS1_BITS);
  3772. wsptr[6*6] = (int) RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS-PASS1_BITS);
  3773. }
  3774. /* Pass 2: process 12 rows from work array, store into output array.
  3775. * 6-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/12).
  3776. */
  3777. wsptr = workspace;
  3778. for (ctr = 0; ctr < 12; ctr++) {
  3779. outptr = output_buf[ctr] + output_col;
  3780. /* Even part */
  3781. /* Add fudge factor here for final descale. */
  3782. tmp10 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  3783. tmp10 <<= CONST_BITS;
  3784. tmp12 = (INT32) wsptr[4];
  3785. tmp20 = MULTIPLY(tmp12, FIX(0.707106781)); /* c4 */
  3786. tmp11 = tmp10 + tmp20;
  3787. tmp21 = tmp10 - tmp20 - tmp20;
  3788. tmp20 = (INT32) wsptr[2];
  3789. tmp10 = MULTIPLY(tmp20, FIX(1.224744871)); /* c2 */
  3790. tmp20 = tmp11 + tmp10;
  3791. tmp22 = tmp11 - tmp10;
  3792. /* Odd part */
  3793. z1 = (INT32) wsptr[1];
  3794. z2 = (INT32) wsptr[3];
  3795. z3 = (INT32) wsptr[5];
  3796. tmp11 = MULTIPLY(z1 + z3, FIX(0.366025404)); /* c5 */
  3797. tmp10 = tmp11 + ((z1 + z2) << CONST_BITS);
  3798. tmp12 = tmp11 + ((z3 - z2) << CONST_BITS);
  3799. tmp11 = (z1 - z2 - z3) << CONST_BITS;
  3800. /* Final output stage */
  3801. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10,
  3802. CONST_BITS+PASS1_BITS+3)
  3803. & RANGE_MASK];
  3804. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10,
  3805. CONST_BITS+PASS1_BITS+3)
  3806. & RANGE_MASK];
  3807. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11,
  3808. CONST_BITS+PASS1_BITS+3)
  3809. & RANGE_MASK];
  3810. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11,
  3811. CONST_BITS+PASS1_BITS+3)
  3812. & RANGE_MASK];
  3813. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12,
  3814. CONST_BITS+PASS1_BITS+3)
  3815. & RANGE_MASK];
  3816. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12,
  3817. CONST_BITS+PASS1_BITS+3)
  3818. & RANGE_MASK];
  3819. wsptr += 6; /* advance pointer to next row */
  3820. }
  3821. }
  3822. /*
  3823. * Perform dequantization and inverse DCT on one block of coefficients,
  3824. * producing a 5x10 output block.
  3825. *
  3826. * 10-point IDCT in pass 1 (columns), 5-point in pass 2 (rows).
  3827. */
  3828. GLOBAL(void)
  3829. jpeg_idct_5x10 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  3830. JCOEFPTR coef_block,
  3831. JSAMPARRAY output_buf, JDIMENSION output_col)
  3832. {
  3833. INT32 tmp10, tmp11, tmp12, tmp13, tmp14;
  3834. INT32 tmp20, tmp21, tmp22, tmp23, tmp24;
  3835. INT32 z1, z2, z3, z4, z5;
  3836. JCOEFPTR inptr;
  3837. ISLOW_MULT_TYPE * quantptr;
  3838. int * wsptr;
  3839. JSAMPROW outptr;
  3840. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  3841. int ctr;
  3842. int workspace[5*10]; /* buffers data between passes */
  3843. SHIFT_TEMPS
  3844. /* Pass 1: process columns from input, store into work array.
  3845. * 10-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/20).
  3846. */
  3847. inptr = coef_block;
  3848. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  3849. wsptr = workspace;
  3850. for (ctr = 0; ctr < 5; ctr++, inptr++, quantptr++, wsptr++) {
  3851. /* Even part */
  3852. z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  3853. z3 <<= CONST_BITS;
  3854. /* Add fudge factor here for final descale. */
  3855. z3 += ONE << (CONST_BITS-PASS1_BITS-1);
  3856. z4 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  3857. z1 = MULTIPLY(z4, FIX(1.144122806)); /* c4 */
  3858. z2 = MULTIPLY(z4, FIX(0.437016024)); /* c8 */
  3859. tmp10 = z3 + z1;
  3860. tmp11 = z3 - z2;
  3861. tmp22 = RIGHT_SHIFT(z3 - ((z1 - z2) << 1), /* c0 = (c4-c8)*2 */
  3862. CONST_BITS-PASS1_BITS);
  3863. z2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  3864. z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  3865. z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c6 */
  3866. tmp12 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c2-c6 */
  3867. tmp13 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c2+c6 */
  3868. tmp20 = tmp10 + tmp12;
  3869. tmp24 = tmp10 - tmp12;
  3870. tmp21 = tmp11 + tmp13;
  3871. tmp23 = tmp11 - tmp13;
  3872. /* Odd part */
  3873. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  3874. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  3875. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  3876. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  3877. tmp11 = z2 + z4;
  3878. tmp13 = z2 - z4;
  3879. tmp12 = MULTIPLY(tmp13, FIX(0.309016994)); /* (c3-c7)/2 */
  3880. z5 = z3 << CONST_BITS;
  3881. z2 = MULTIPLY(tmp11, FIX(0.951056516)); /* (c3+c7)/2 */
  3882. z4 = z5 + tmp12;
  3883. tmp10 = MULTIPLY(z1, FIX(1.396802247)) + z2 + z4; /* c1 */
  3884. tmp14 = MULTIPLY(z1, FIX(0.221231742)) - z2 + z4; /* c9 */
  3885. z2 = MULTIPLY(tmp11, FIX(0.587785252)); /* (c1-c9)/2 */
  3886. z4 = z5 - tmp12 - (tmp13 << (CONST_BITS - 1));
  3887. tmp12 = (z1 - tmp13 - z3) << PASS1_BITS;
  3888. tmp11 = MULTIPLY(z1, FIX(1.260073511)) - z2 - z4; /* c3 */
  3889. tmp13 = MULTIPLY(z1, FIX(0.642039522)) - z2 + z4; /* c7 */
  3890. /* Final output stage */
  3891. wsptr[5*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS);
  3892. wsptr[5*9] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS);
  3893. wsptr[5*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS);
  3894. wsptr[5*8] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS);
  3895. wsptr[5*2] = (int) (tmp22 + tmp12);
  3896. wsptr[5*7] = (int) (tmp22 - tmp12);
  3897. wsptr[5*3] = (int) RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS-PASS1_BITS);
  3898. wsptr[5*6] = (int) RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS-PASS1_BITS);
  3899. wsptr[5*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS);
  3900. wsptr[5*5] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS);
  3901. }
  3902. /* Pass 2: process 10 rows from work array, store into output array.
  3903. * 5-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/10).
  3904. */
  3905. wsptr = workspace;
  3906. for (ctr = 0; ctr < 10; ctr++) {
  3907. outptr = output_buf[ctr] + output_col;
  3908. /* Even part */
  3909. /* Add fudge factor here for final descale. */
  3910. tmp12 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  3911. tmp12 <<= CONST_BITS;
  3912. tmp13 = (INT32) wsptr[2];
  3913. tmp14 = (INT32) wsptr[4];
  3914. z1 = MULTIPLY(tmp13 + tmp14, FIX(0.790569415)); /* (c2+c4)/2 */
  3915. z2 = MULTIPLY(tmp13 - tmp14, FIX(0.353553391)); /* (c2-c4)/2 */
  3916. z3 = tmp12 + z2;
  3917. tmp10 = z3 + z1;
  3918. tmp11 = z3 - z1;
  3919. tmp12 -= z2 << 2;
  3920. /* Odd part */
  3921. z2 = (INT32) wsptr[1];
  3922. z3 = (INT32) wsptr[3];
  3923. z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c3 */
  3924. tmp13 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c1-c3 */
  3925. tmp14 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c1+c3 */
  3926. /* Final output stage */
  3927. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp13,
  3928. CONST_BITS+PASS1_BITS+3)
  3929. & RANGE_MASK];
  3930. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp13,
  3931. CONST_BITS+PASS1_BITS+3)
  3932. & RANGE_MASK];
  3933. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp14,
  3934. CONST_BITS+PASS1_BITS+3)
  3935. & RANGE_MASK];
  3936. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp14,
  3937. CONST_BITS+PASS1_BITS+3)
  3938. & RANGE_MASK];
  3939. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12,
  3940. CONST_BITS+PASS1_BITS+3)
  3941. & RANGE_MASK];
  3942. wsptr += 5; /* advance pointer to next row */
  3943. }
  3944. }
  3945. /*
  3946. * Perform dequantization and inverse DCT on one block of coefficients,
  3947. * producing a 4x8 output block.
  3948. *
  3949. * 8-point IDCT in pass 1 (columns), 4-point in pass 2 (rows).
  3950. */
  3951. GLOBAL(void)
  3952. jpeg_idct_4x8 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  3953. JCOEFPTR coef_block,
  3954. JSAMPARRAY output_buf, JDIMENSION output_col)
  3955. {
  3956. INT32 tmp0, tmp1, tmp2, tmp3;
  3957. INT32 tmp10, tmp11, tmp12, tmp13;
  3958. INT32 z1, z2, z3;
  3959. JCOEFPTR inptr;
  3960. ISLOW_MULT_TYPE * quantptr;
  3961. int * wsptr;
  3962. JSAMPROW outptr;
  3963. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  3964. int ctr;
  3965. int workspace[4*8]; /* buffers data between passes */
  3966. SHIFT_TEMPS
  3967. /* Pass 1: process columns from input, store into work array.
  3968. * Note results are scaled up by sqrt(8) compared to a true IDCT;
  3969. * furthermore, we scale the results by 2**PASS1_BITS.
  3970. * 8-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/16).
  3971. */
  3972. inptr = coef_block;
  3973. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  3974. wsptr = workspace;
  3975. for (ctr = 4; ctr > 0; ctr--) {
  3976. /* Due to quantization, we will usually find that many of the input
  3977. * coefficients are zero, especially the AC terms. We can exploit this
  3978. * by short-circuiting the IDCT calculation for any column in which all
  3979. * the AC terms are zero. In that case each output is equal to the
  3980. * DC coefficient (with scale factor as needed).
  3981. * With typical images and quantization tables, half or more of the
  3982. * column DCT calculations can be simplified this way.
  3983. */
  3984. if (inptr[DCTSIZE*1] == 0 && inptr[DCTSIZE*2] == 0 &&
  3985. inptr[DCTSIZE*3] == 0 && inptr[DCTSIZE*4] == 0 &&
  3986. inptr[DCTSIZE*5] == 0 && inptr[DCTSIZE*6] == 0 &&
  3987. inptr[DCTSIZE*7] == 0) {
  3988. /* AC terms all zero */
  3989. int dcval = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) << PASS1_BITS;
  3990. wsptr[4*0] = dcval;
  3991. wsptr[4*1] = dcval;
  3992. wsptr[4*2] = dcval;
  3993. wsptr[4*3] = dcval;
  3994. wsptr[4*4] = dcval;
  3995. wsptr[4*5] = dcval;
  3996. wsptr[4*6] = dcval;
  3997. wsptr[4*7] = dcval;
  3998. inptr++; /* advance pointers to next column */
  3999. quantptr++;
  4000. wsptr++;
  4001. continue;
  4002. }
  4003. /* Even part: reverse the even part of the forward DCT.
  4004. * The rotator is c(-6).
  4005. */
  4006. z2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  4007. z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  4008. z1 = MULTIPLY(z2 + z3, FIX_0_541196100); /* c6 */
  4009. tmp2 = z1 + MULTIPLY(z2, FIX_0_765366865); /* c2-c6 */
  4010. tmp3 = z1 - MULTIPLY(z3, FIX_1_847759065); /* c2+c6 */
  4011. z2 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  4012. z3 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  4013. z2 <<= CONST_BITS;
  4014. z3 <<= CONST_BITS;
  4015. /* Add fudge factor here for final descale. */
  4016. z2 += ONE << (CONST_BITS-PASS1_BITS-1);
  4017. tmp0 = z2 + z3;
  4018. tmp1 = z2 - z3;
  4019. tmp10 = tmp0 + tmp2;
  4020. tmp13 = tmp0 - tmp2;
  4021. tmp11 = tmp1 + tmp3;
  4022. tmp12 = tmp1 - tmp3;
  4023. /* Odd part per figure 8; the matrix is unitary and hence its
  4024. * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
  4025. */
  4026. tmp0 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  4027. tmp1 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  4028. tmp2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  4029. tmp3 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  4030. z2 = tmp0 + tmp2;
  4031. z3 = tmp1 + tmp3;
  4032. z1 = MULTIPLY(z2 + z3, FIX_1_175875602); /* c3 */
  4033. z2 = MULTIPLY(z2, - FIX_1_961570560); /* -c3-c5 */
  4034. z3 = MULTIPLY(z3, - FIX_0_390180644); /* -c3+c5 */
  4035. z2 += z1;
  4036. z3 += z1;
  4037. z1 = MULTIPLY(tmp0 + tmp3, - FIX_0_899976223); /* -c3+c7 */
  4038. tmp0 = MULTIPLY(tmp0, FIX_0_298631336); /* -c1+c3+c5-c7 */
  4039. tmp3 = MULTIPLY(tmp3, FIX_1_501321110); /* c1+c3-c5-c7 */
  4040. tmp0 += z1 + z2;
  4041. tmp3 += z1 + z3;
  4042. z1 = MULTIPLY(tmp1 + tmp2, - FIX_2_562915447); /* -c1-c3 */
  4043. tmp1 = MULTIPLY(tmp1, FIX_2_053119869); /* c1+c3-c5+c7 */
  4044. tmp2 = MULTIPLY(tmp2, FIX_3_072711026); /* c1+c3+c5-c7 */
  4045. tmp1 += z1 + z3;
  4046. tmp2 += z1 + z2;
  4047. /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
  4048. wsptr[4*0] = (int) RIGHT_SHIFT(tmp10 + tmp3, CONST_BITS-PASS1_BITS);
  4049. wsptr[4*7] = (int) RIGHT_SHIFT(tmp10 - tmp3, CONST_BITS-PASS1_BITS);
  4050. wsptr[4*1] = (int) RIGHT_SHIFT(tmp11 + tmp2, CONST_BITS-PASS1_BITS);
  4051. wsptr[4*6] = (int) RIGHT_SHIFT(tmp11 - tmp2, CONST_BITS-PASS1_BITS);
  4052. wsptr[4*2] = (int) RIGHT_SHIFT(tmp12 + tmp1, CONST_BITS-PASS1_BITS);
  4053. wsptr[4*5] = (int) RIGHT_SHIFT(tmp12 - tmp1, CONST_BITS-PASS1_BITS);
  4054. wsptr[4*3] = (int) RIGHT_SHIFT(tmp13 + tmp0, CONST_BITS-PASS1_BITS);
  4055. wsptr[4*4] = (int) RIGHT_SHIFT(tmp13 - tmp0, CONST_BITS-PASS1_BITS);
  4056. inptr++; /* advance pointers to next column */
  4057. quantptr++;
  4058. wsptr++;
  4059. }
  4060. /* Pass 2: process 8 rows from work array, store into output array.
  4061. * 4-point IDCT kernel,
  4062. * cK represents sqrt(2) * cos(K*pi/16) [refers to 8-point IDCT].
  4063. */
  4064. wsptr = workspace;
  4065. for (ctr = 0; ctr < 8; ctr++) {
  4066. outptr = output_buf[ctr] + output_col;
  4067. /* Even part */
  4068. /* Add fudge factor here for final descale. */
  4069. tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  4070. tmp2 = (INT32) wsptr[2];
  4071. tmp10 = (tmp0 + tmp2) << CONST_BITS;
  4072. tmp12 = (tmp0 - tmp2) << CONST_BITS;
  4073. /* Odd part */
  4074. /* Same rotation as in the even part of the 8x8 LL&M IDCT */
  4075. z2 = (INT32) wsptr[1];
  4076. z3 = (INT32) wsptr[3];
  4077. z1 = MULTIPLY(z2 + z3, FIX_0_541196100); /* c6 */
  4078. tmp0 = z1 + MULTIPLY(z2, FIX_0_765366865); /* c2-c6 */
  4079. tmp2 = z1 - MULTIPLY(z3, FIX_1_847759065); /* c2+c6 */
  4080. /* Final output stage */
  4081. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0,
  4082. CONST_BITS+PASS1_BITS+3)
  4083. & RANGE_MASK];
  4084. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0,
  4085. CONST_BITS+PASS1_BITS+3)
  4086. & RANGE_MASK];
  4087. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp2,
  4088. CONST_BITS+PASS1_BITS+3)
  4089. & RANGE_MASK];
  4090. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp2,
  4091. CONST_BITS+PASS1_BITS+3)
  4092. & RANGE_MASK];
  4093. wsptr += 4; /* advance pointer to next row */
  4094. }
  4095. }
  4096. /*
  4097. * Perform dequantization and inverse DCT on one block of coefficients,
  4098. * producing a reduced-size 3x6 output block.
  4099. *
  4100. * 6-point IDCT in pass 1 (columns), 3-point in pass 2 (rows).
  4101. */
  4102. GLOBAL(void)
  4103. jpeg_idct_3x6 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  4104. JCOEFPTR coef_block,
  4105. JSAMPARRAY output_buf, JDIMENSION output_col)
  4106. {
  4107. INT32 tmp0, tmp1, tmp2, tmp10, tmp11, tmp12;
  4108. INT32 z1, z2, z3;
  4109. JCOEFPTR inptr;
  4110. ISLOW_MULT_TYPE * quantptr;
  4111. int * wsptr;
  4112. JSAMPROW outptr;
  4113. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  4114. int ctr;
  4115. int workspace[3*6]; /* buffers data between passes */
  4116. SHIFT_TEMPS
  4117. /* Pass 1: process columns from input, store into work array.
  4118. * 6-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/12).
  4119. */
  4120. inptr = coef_block;
  4121. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  4122. wsptr = workspace;
  4123. for (ctr = 0; ctr < 3; ctr++, inptr++, quantptr++, wsptr++) {
  4124. /* Even part */
  4125. tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  4126. tmp0 <<= CONST_BITS;
  4127. /* Add fudge factor here for final descale. */
  4128. tmp0 += ONE << (CONST_BITS-PASS1_BITS-1);
  4129. tmp2 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  4130. tmp10 = MULTIPLY(tmp2, FIX(0.707106781)); /* c4 */
  4131. tmp1 = tmp0 + tmp10;
  4132. tmp11 = RIGHT_SHIFT(tmp0 - tmp10 - tmp10, CONST_BITS-PASS1_BITS);
  4133. tmp10 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  4134. tmp0 = MULTIPLY(tmp10, FIX(1.224744871)); /* c2 */
  4135. tmp10 = tmp1 + tmp0;
  4136. tmp12 = tmp1 - tmp0;
  4137. /* Odd part */
  4138. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  4139. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  4140. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  4141. tmp1 = MULTIPLY(z1 + z3, FIX(0.366025404)); /* c5 */
  4142. tmp0 = tmp1 + ((z1 + z2) << CONST_BITS);
  4143. tmp2 = tmp1 + ((z3 - z2) << CONST_BITS);
  4144. tmp1 = (z1 - z2 - z3) << PASS1_BITS;
  4145. /* Final output stage */
  4146. wsptr[3*0] = (int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS-PASS1_BITS);
  4147. wsptr[3*5] = (int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS-PASS1_BITS);
  4148. wsptr[3*1] = (int) (tmp11 + tmp1);
  4149. wsptr[3*4] = (int) (tmp11 - tmp1);
  4150. wsptr[3*2] = (int) RIGHT_SHIFT(tmp12 + tmp2, CONST_BITS-PASS1_BITS);
  4151. wsptr[3*3] = (int) RIGHT_SHIFT(tmp12 - tmp2, CONST_BITS-PASS1_BITS);
  4152. }
  4153. /* Pass 2: process 6 rows from work array, store into output array.
  4154. * 3-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/6).
  4155. */
  4156. wsptr = workspace;
  4157. for (ctr = 0; ctr < 6; ctr++) {
  4158. outptr = output_buf[ctr] + output_col;
  4159. /* Even part */
  4160. /* Add fudge factor here for final descale. */
  4161. tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  4162. tmp0 <<= CONST_BITS;
  4163. tmp2 = (INT32) wsptr[2];
  4164. tmp12 = MULTIPLY(tmp2, FIX(0.707106781)); /* c2 */
  4165. tmp10 = tmp0 + tmp12;
  4166. tmp2 = tmp0 - tmp12 - tmp12;
  4167. /* Odd part */
  4168. tmp12 = (INT32) wsptr[1];
  4169. tmp0 = MULTIPLY(tmp12, FIX(1.224744871)); /* c1 */
  4170. /* Final output stage */
  4171. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0,
  4172. CONST_BITS+PASS1_BITS+3)
  4173. & RANGE_MASK];
  4174. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0,
  4175. CONST_BITS+PASS1_BITS+3)
  4176. & RANGE_MASK];
  4177. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp2,
  4178. CONST_BITS+PASS1_BITS+3)
  4179. & RANGE_MASK];
  4180. wsptr += 3; /* advance pointer to next row */
  4181. }
  4182. }
  4183. /*
  4184. * Perform dequantization and inverse DCT on one block of coefficients,
  4185. * producing a 2x4 output block.
  4186. *
  4187. * 4-point IDCT in pass 1 (columns), 2-point in pass 2 (rows).
  4188. */
  4189. GLOBAL(void)
  4190. jpeg_idct_2x4 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  4191. JCOEFPTR coef_block,
  4192. JSAMPARRAY output_buf, JDIMENSION output_col)
  4193. {
  4194. INT32 tmp0, tmp2, tmp10, tmp12;
  4195. INT32 z1, z2, z3;
  4196. JCOEFPTR inptr;
  4197. ISLOW_MULT_TYPE * quantptr;
  4198. INT32 * wsptr;
  4199. JSAMPROW outptr;
  4200. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  4201. int ctr;
  4202. INT32 workspace[2*4]; /* buffers data between passes */
  4203. SHIFT_TEMPS
  4204. /* Pass 1: process columns from input, store into work array.
  4205. * 4-point IDCT kernel,
  4206. * cK represents sqrt(2) * cos(K*pi/16) [refers to 8-point IDCT].
  4207. */
  4208. inptr = coef_block;
  4209. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  4210. wsptr = workspace;
  4211. for (ctr = 0; ctr < 2; ctr++, inptr++, quantptr++, wsptr++) {
  4212. /* Even part */
  4213. tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  4214. tmp2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  4215. tmp10 = (tmp0 + tmp2) << CONST_BITS;
  4216. tmp12 = (tmp0 - tmp2) << CONST_BITS;
  4217. /* Odd part */
  4218. /* Same rotation as in the even part of the 8x8 LL&M IDCT */
  4219. z2 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  4220. z3 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  4221. z1 = MULTIPLY(z2 + z3, FIX_0_541196100); /* c6 */
  4222. tmp0 = z1 + MULTIPLY(z2, FIX_0_765366865); /* c2-c6 */
  4223. tmp2 = z1 - MULTIPLY(z3, FIX_1_847759065); /* c2+c6 */
  4224. /* Final output stage */
  4225. wsptr[2*0] = tmp10 + tmp0;
  4226. wsptr[2*3] = tmp10 - tmp0;
  4227. wsptr[2*1] = tmp12 + tmp2;
  4228. wsptr[2*2] = tmp12 - tmp2;
  4229. }
  4230. /* Pass 2: process 4 rows from work array, store into output array. */
  4231. wsptr = workspace;
  4232. for (ctr = 0; ctr < 4; ctr++) {
  4233. outptr = output_buf[ctr] + output_col;
  4234. /* Even part */
  4235. /* Add fudge factor here for final descale. */
  4236. tmp10 = wsptr[0] + (ONE << (CONST_BITS+2));
  4237. /* Odd part */
  4238. tmp0 = wsptr[1];
  4239. /* Final output stage */
  4240. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS+3)
  4241. & RANGE_MASK];
  4242. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS+3)
  4243. & RANGE_MASK];
  4244. wsptr += 2; /* advance pointer to next row */
  4245. }
  4246. }
  4247. /*
  4248. * Perform dequantization and inverse DCT on one block of coefficients,
  4249. * producing a 1x2 output block.
  4250. *
  4251. * 2-point IDCT in pass 1 (columns), 1-point in pass 2 (rows).
  4252. */
  4253. GLOBAL(void)
  4254. jpeg_idct_1x2 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  4255. JCOEFPTR coef_block,
  4256. JSAMPARRAY output_buf, JDIMENSION output_col)
  4257. {
  4258. INT32 tmp0, tmp1;
  4259. ISLOW_MULT_TYPE * quantptr;
  4260. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  4261. SHIFT_TEMPS
  4262. /* Process 1 column from input, store into output array. */
  4263. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  4264. /* Even part */
  4265. tmp0 = DEQUANTIZE(coef_block[DCTSIZE*0], quantptr[DCTSIZE*0]);
  4266. /* Add fudge factor here for final descale. */
  4267. tmp0 += ONE << 2;
  4268. /* Odd part */
  4269. tmp1 = DEQUANTIZE(coef_block[DCTSIZE*1], quantptr[DCTSIZE*1]);
  4270. /* Final output stage */
  4271. output_buf[0][output_col] = range_limit[(int) RIGHT_SHIFT(tmp0 + tmp1, 3)
  4272. & RANGE_MASK];
  4273. output_buf[1][output_col] = range_limit[(int) RIGHT_SHIFT(tmp0 - tmp1, 3)
  4274. & RANGE_MASK];
  4275. }
  4276. #endif /* IDCT_SCALING_SUPPORTED */
  4277. #endif /* DCT_ISLOW_SUPPORTED */