zstd_compress.c 218 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208
  1. /*
  2. * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
  3. * All rights reserved.
  4. *
  5. * This source code is licensed under both the BSD-style license (found in the
  6. * LICENSE file in the root directory of this source tree) and the GPLv2 (found
  7. * in the COPYING file in the root directory of this source tree).
  8. * You may select, at your option, one of the above-listed licenses.
  9. */
  10. /*-*************************************
  11. * Dependencies
  12. ***************************************/
  13. #include "../common/zstd_deps.h" /* INT_MAX, ZSTD_memset, ZSTD_memcpy */
  14. #include "../common/cpu.h"
  15. #include "../common/mem.h"
  16. #include "hist.h" /* HIST_countFast_wksp */
  17. #define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */
  18. #include "../common/fse.h"
  19. #define HUF_STATIC_LINKING_ONLY
  20. #include "../common/huf.h"
  21. #include "zstd_compress_internal.h"
  22. #include "zstd_compress_sequences.h"
  23. #include "zstd_compress_literals.h"
  24. #include "zstd_fast.h"
  25. #include "zstd_double_fast.h"
  26. #include "zstd_lazy.h"
  27. #include "zstd_opt.h"
  28. #include "zstd_ldm.h"
  29. #include "zstd_compress_superblock.h"
  30. /* ***************************************************************
  31. * Tuning parameters
  32. *****************************************************************/
  33. /*!
  34. * COMPRESS_HEAPMODE :
  35. * Select how default decompression function ZSTD_compress() allocates its context,
  36. * on stack (0, default), or into heap (1).
  37. * Note that functions with explicit context such as ZSTD_compressCCtx() are unaffected.
  38. */
  39. #ifndef ZSTD_COMPRESS_HEAPMODE
  40. # define ZSTD_COMPRESS_HEAPMODE 0
  41. #endif
  42. /*-*************************************
  43. * Helper functions
  44. ***************************************/
  45. /* ZSTD_compressBound()
  46. * Note that the result from this function is only compatible with the "normal"
  47. * full-block strategy.
  48. * When there are a lot of small blocks due to frequent flush in streaming mode
  49. * the overhead of headers can make the compressed data to be larger than the
  50. * return value of ZSTD_compressBound().
  51. */
  52. size_t ZSTD_compressBound(size_t srcSize) {
  53. return ZSTD_COMPRESSBOUND(srcSize);
  54. }
  55. /*-*************************************
  56. * Context memory management
  57. ***************************************/
  58. struct ZSTD_CDict_s {
  59. const void* dictContent;
  60. size_t dictContentSize;
  61. ZSTD_dictContentType_e dictContentType; /* The dictContentType the CDict was created with */
  62. U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
  63. ZSTD_cwksp workspace;
  64. ZSTD_matchState_t matchState;
  65. ZSTD_compressedBlockState_t cBlockState;
  66. ZSTD_customMem customMem;
  67. U32 dictID;
  68. int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */
  69. }; /* typedef'd to ZSTD_CDict within "zstd.h" */
  70. ZSTD_CCtx* ZSTD_createCCtx(void)
  71. {
  72. return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
  73. }
  74. static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager)
  75. {
  76. assert(cctx != NULL);
  77. ZSTD_memset(cctx, 0, sizeof(*cctx));
  78. cctx->customMem = memManager;
  79. cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
  80. { size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters);
  81. assert(!ZSTD_isError(err));
  82. (void)err;
  83. }
  84. }
  85. ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
  86. {
  87. ZSTD_STATIC_ASSERT(zcss_init==0);
  88. ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
  89. if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
  90. { ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_customMalloc(sizeof(ZSTD_CCtx), customMem);
  91. if (!cctx) return NULL;
  92. ZSTD_initCCtx(cctx, customMem);
  93. return cctx;
  94. }
  95. }
  96. ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize)
  97. {
  98. ZSTD_cwksp ws;
  99. ZSTD_CCtx* cctx;
  100. if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL; /* minimum size */
  101. if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */
  102. ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
  103. cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx));
  104. if (cctx == NULL) return NULL;
  105. ZSTD_memset(cctx, 0, sizeof(ZSTD_CCtx));
  106. ZSTD_cwksp_move(&cctx->workspace, &ws);
  107. cctx->staticSize = workspaceSize;
  108. /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
  109. if (!ZSTD_cwksp_check_available(&cctx->workspace, ENTROPY_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL;
  110. cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
  111. cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
  112. cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, ENTROPY_WORKSPACE_SIZE);
  113. cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
  114. return cctx;
  115. }
  116. /**
  117. * Clears and frees all of the dictionaries in the CCtx.
  118. */
  119. static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx)
  120. {
  121. ZSTD_customFree(cctx->localDict.dictBuffer, cctx->customMem);
  122. ZSTD_freeCDict(cctx->localDict.cdict);
  123. ZSTD_memset(&cctx->localDict, 0, sizeof(cctx->localDict));
  124. ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));
  125. cctx->cdict = NULL;
  126. }
  127. static size_t ZSTD_sizeof_localDict(ZSTD_localDict dict)
  128. {
  129. size_t const bufferSize = dict.dictBuffer != NULL ? dict.dictSize : 0;
  130. size_t const cdictSize = ZSTD_sizeof_CDict(dict.cdict);
  131. return bufferSize + cdictSize;
  132. }
  133. static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
  134. {
  135. assert(cctx != NULL);
  136. assert(cctx->staticSize == 0);
  137. ZSTD_clearAllDicts(cctx);
  138. #ifdef ZSTD_MULTITHREAD
  139. ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL;
  140. #endif
  141. ZSTD_cwksp_free(&cctx->workspace, cctx->customMem);
  142. }
  143. size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
  144. {
  145. if (cctx==NULL) return 0; /* support free on NULL */
  146. RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
  147. "not compatible with static CCtx");
  148. {
  149. int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx);
  150. ZSTD_freeCCtxContent(cctx);
  151. if (!cctxInWorkspace) {
  152. ZSTD_customFree(cctx, cctx->customMem);
  153. }
  154. }
  155. return 0;
  156. }
  157. static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
  158. {
  159. #ifdef ZSTD_MULTITHREAD
  160. return ZSTDMT_sizeof_CCtx(cctx->mtctx);
  161. #else
  162. (void)cctx;
  163. return 0;
  164. #endif
  165. }
  166. size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
  167. {
  168. if (cctx==NULL) return 0; /* support sizeof on NULL */
  169. /* cctx may be in the workspace */
  170. return (cctx->workspace.workspace == cctx ? 0 : sizeof(*cctx))
  171. + ZSTD_cwksp_sizeof(&cctx->workspace)
  172. + ZSTD_sizeof_localDict(cctx->localDict)
  173. + ZSTD_sizeof_mtctx(cctx);
  174. }
  175. size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
  176. {
  177. return ZSTD_sizeof_CCtx(zcs); /* same object */
  178. }
  179. /* private API call, for dictBuilder only */
  180. const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
  181. /* Returns 1 if compression parameters are such that we should
  182. * enable long distance matching (wlog >= 27, strategy >= btopt).
  183. * Returns 0 otherwise.
  184. */
  185. static U32 ZSTD_CParams_shouldEnableLdm(const ZSTD_compressionParameters* const cParams) {
  186. return cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27;
  187. }
  188. static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
  189. ZSTD_compressionParameters cParams)
  190. {
  191. ZSTD_CCtx_params cctxParams;
  192. /* should not matter, as all cParams are presumed properly defined */
  193. ZSTD_CCtxParams_init(&cctxParams, ZSTD_CLEVEL_DEFAULT);
  194. cctxParams.cParams = cParams;
  195. if (ZSTD_CParams_shouldEnableLdm(&cParams)) {
  196. DEBUGLOG(4, "ZSTD_makeCCtxParamsFromCParams(): Including LDM into cctx params");
  197. cctxParams.ldmParams.enableLdm = 1;
  198. /* LDM is enabled by default for optimal parser and window size >= 128MB */
  199. ZSTD_ldm_adjustParameters(&cctxParams.ldmParams, &cParams);
  200. assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog);
  201. assert(cctxParams.ldmParams.hashRateLog < 32);
  202. }
  203. assert(!ZSTD_checkCParams(cParams));
  204. return cctxParams;
  205. }
  206. static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(
  207. ZSTD_customMem customMem)
  208. {
  209. ZSTD_CCtx_params* params;
  210. if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
  211. params = (ZSTD_CCtx_params*)ZSTD_customCalloc(
  212. sizeof(ZSTD_CCtx_params), customMem);
  213. if (!params) { return NULL; }
  214. ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
  215. params->customMem = customMem;
  216. return params;
  217. }
  218. ZSTD_CCtx_params* ZSTD_createCCtxParams(void)
  219. {
  220. return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem);
  221. }
  222. size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params)
  223. {
  224. if (params == NULL) { return 0; }
  225. ZSTD_customFree(params, params->customMem);
  226. return 0;
  227. }
  228. size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params)
  229. {
  230. return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
  231. }
  232. size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
  233. RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
  234. ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
  235. cctxParams->compressionLevel = compressionLevel;
  236. cctxParams->fParams.contentSizeFlag = 1;
  237. return 0;
  238. }
  239. size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
  240. {
  241. RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
  242. FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
  243. ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
  244. assert(!ZSTD_checkCParams(params.cParams));
  245. cctxParams->cParams = params.cParams;
  246. cctxParams->fParams = params.fParams;
  247. cctxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */
  248. return 0;
  249. }
  250. /* ZSTD_assignParamsToCCtxParams() :
  251. * params is presumed valid at this stage */
  252. static ZSTD_CCtx_params ZSTD_assignParamsToCCtxParams(
  253. const ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params)
  254. {
  255. ZSTD_CCtx_params ret = *cctxParams;
  256. assert(!ZSTD_checkCParams(params->cParams));
  257. ret.cParams = params->cParams;
  258. ret.fParams = params->fParams;
  259. ret.compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */
  260. return ret;
  261. }
  262. ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
  263. {
  264. ZSTD_bounds bounds = { 0, 0, 0 };
  265. switch(param)
  266. {
  267. case ZSTD_c_compressionLevel:
  268. bounds.lowerBound = ZSTD_minCLevel();
  269. bounds.upperBound = ZSTD_maxCLevel();
  270. return bounds;
  271. case ZSTD_c_windowLog:
  272. bounds.lowerBound = ZSTD_WINDOWLOG_MIN;
  273. bounds.upperBound = ZSTD_WINDOWLOG_MAX;
  274. return bounds;
  275. case ZSTD_c_hashLog:
  276. bounds.lowerBound = ZSTD_HASHLOG_MIN;
  277. bounds.upperBound = ZSTD_HASHLOG_MAX;
  278. return bounds;
  279. case ZSTD_c_chainLog:
  280. bounds.lowerBound = ZSTD_CHAINLOG_MIN;
  281. bounds.upperBound = ZSTD_CHAINLOG_MAX;
  282. return bounds;
  283. case ZSTD_c_searchLog:
  284. bounds.lowerBound = ZSTD_SEARCHLOG_MIN;
  285. bounds.upperBound = ZSTD_SEARCHLOG_MAX;
  286. return bounds;
  287. case ZSTD_c_minMatch:
  288. bounds.lowerBound = ZSTD_MINMATCH_MIN;
  289. bounds.upperBound = ZSTD_MINMATCH_MAX;
  290. return bounds;
  291. case ZSTD_c_targetLength:
  292. bounds.lowerBound = ZSTD_TARGETLENGTH_MIN;
  293. bounds.upperBound = ZSTD_TARGETLENGTH_MAX;
  294. return bounds;
  295. case ZSTD_c_strategy:
  296. bounds.lowerBound = ZSTD_STRATEGY_MIN;
  297. bounds.upperBound = ZSTD_STRATEGY_MAX;
  298. return bounds;
  299. case ZSTD_c_contentSizeFlag:
  300. bounds.lowerBound = 0;
  301. bounds.upperBound = 1;
  302. return bounds;
  303. case ZSTD_c_checksumFlag:
  304. bounds.lowerBound = 0;
  305. bounds.upperBound = 1;
  306. return bounds;
  307. case ZSTD_c_dictIDFlag:
  308. bounds.lowerBound = 0;
  309. bounds.upperBound = 1;
  310. return bounds;
  311. case ZSTD_c_nbWorkers:
  312. bounds.lowerBound = 0;
  313. #ifdef ZSTD_MULTITHREAD
  314. bounds.upperBound = ZSTDMT_NBWORKERS_MAX;
  315. #else
  316. bounds.upperBound = 0;
  317. #endif
  318. return bounds;
  319. case ZSTD_c_jobSize:
  320. bounds.lowerBound = 0;
  321. #ifdef ZSTD_MULTITHREAD
  322. bounds.upperBound = ZSTDMT_JOBSIZE_MAX;
  323. #else
  324. bounds.upperBound = 0;
  325. #endif
  326. return bounds;
  327. case ZSTD_c_overlapLog:
  328. #ifdef ZSTD_MULTITHREAD
  329. bounds.lowerBound = ZSTD_OVERLAPLOG_MIN;
  330. bounds.upperBound = ZSTD_OVERLAPLOG_MAX;
  331. #else
  332. bounds.lowerBound = 0;
  333. bounds.upperBound = 0;
  334. #endif
  335. return bounds;
  336. case ZSTD_c_enableDedicatedDictSearch:
  337. bounds.lowerBound = 0;
  338. bounds.upperBound = 1;
  339. return bounds;
  340. case ZSTD_c_enableLongDistanceMatching:
  341. bounds.lowerBound = 0;
  342. bounds.upperBound = 1;
  343. return bounds;
  344. case ZSTD_c_ldmHashLog:
  345. bounds.lowerBound = ZSTD_LDM_HASHLOG_MIN;
  346. bounds.upperBound = ZSTD_LDM_HASHLOG_MAX;
  347. return bounds;
  348. case ZSTD_c_ldmMinMatch:
  349. bounds.lowerBound = ZSTD_LDM_MINMATCH_MIN;
  350. bounds.upperBound = ZSTD_LDM_MINMATCH_MAX;
  351. return bounds;
  352. case ZSTD_c_ldmBucketSizeLog:
  353. bounds.lowerBound = ZSTD_LDM_BUCKETSIZELOG_MIN;
  354. bounds.upperBound = ZSTD_LDM_BUCKETSIZELOG_MAX;
  355. return bounds;
  356. case ZSTD_c_ldmHashRateLog:
  357. bounds.lowerBound = ZSTD_LDM_HASHRATELOG_MIN;
  358. bounds.upperBound = ZSTD_LDM_HASHRATELOG_MAX;
  359. return bounds;
  360. /* experimental parameters */
  361. case ZSTD_c_rsyncable:
  362. bounds.lowerBound = 0;
  363. bounds.upperBound = 1;
  364. return bounds;
  365. case ZSTD_c_forceMaxWindow :
  366. bounds.lowerBound = 0;
  367. bounds.upperBound = 1;
  368. return bounds;
  369. case ZSTD_c_format:
  370. ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
  371. bounds.lowerBound = ZSTD_f_zstd1;
  372. bounds.upperBound = ZSTD_f_zstd1_magicless; /* note : how to ensure at compile time that this is the highest value enum ? */
  373. return bounds;
  374. case ZSTD_c_forceAttachDict:
  375. ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceLoad);
  376. bounds.lowerBound = ZSTD_dictDefaultAttach;
  377. bounds.upperBound = ZSTD_dictForceLoad; /* note : how to ensure at compile time that this is the highest value enum ? */
  378. return bounds;
  379. case ZSTD_c_literalCompressionMode:
  380. ZSTD_STATIC_ASSERT(ZSTD_lcm_auto < ZSTD_lcm_huffman && ZSTD_lcm_huffman < ZSTD_lcm_uncompressed);
  381. bounds.lowerBound = ZSTD_lcm_auto;
  382. bounds.upperBound = ZSTD_lcm_uncompressed;
  383. return bounds;
  384. case ZSTD_c_targetCBlockSize:
  385. bounds.lowerBound = ZSTD_TARGETCBLOCKSIZE_MIN;
  386. bounds.upperBound = ZSTD_TARGETCBLOCKSIZE_MAX;
  387. return bounds;
  388. case ZSTD_c_srcSizeHint:
  389. bounds.lowerBound = ZSTD_SRCSIZEHINT_MIN;
  390. bounds.upperBound = ZSTD_SRCSIZEHINT_MAX;
  391. return bounds;
  392. case ZSTD_c_stableInBuffer:
  393. case ZSTD_c_stableOutBuffer:
  394. bounds.lowerBound = (int)ZSTD_bm_buffered;
  395. bounds.upperBound = (int)ZSTD_bm_stable;
  396. return bounds;
  397. case ZSTD_c_blockDelimiters:
  398. bounds.lowerBound = (int)ZSTD_sf_noBlockDelimiters;
  399. bounds.upperBound = (int)ZSTD_sf_explicitBlockDelimiters;
  400. return bounds;
  401. case ZSTD_c_validateSequences:
  402. bounds.lowerBound = 0;
  403. bounds.upperBound = 1;
  404. return bounds;
  405. default:
  406. bounds.error = ERROR(parameter_unsupported);
  407. return bounds;
  408. }
  409. }
  410. /* ZSTD_cParam_clampBounds:
  411. * Clamps the value into the bounded range.
  412. */
  413. static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value)
  414. {
  415. ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
  416. if (ZSTD_isError(bounds.error)) return bounds.error;
  417. if (*value < bounds.lowerBound) *value = bounds.lowerBound;
  418. if (*value > bounds.upperBound) *value = bounds.upperBound;
  419. return 0;
  420. }
  421. #define BOUNDCHECK(cParam, val) { \
  422. RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
  423. parameter_outOfBound, "Param out of bounds"); \
  424. }
  425. static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
  426. {
  427. switch(param)
  428. {
  429. case ZSTD_c_compressionLevel:
  430. case ZSTD_c_hashLog:
  431. case ZSTD_c_chainLog:
  432. case ZSTD_c_searchLog:
  433. case ZSTD_c_minMatch:
  434. case ZSTD_c_targetLength:
  435. case ZSTD_c_strategy:
  436. return 1;
  437. case ZSTD_c_format:
  438. case ZSTD_c_windowLog:
  439. case ZSTD_c_contentSizeFlag:
  440. case ZSTD_c_checksumFlag:
  441. case ZSTD_c_dictIDFlag:
  442. case ZSTD_c_forceMaxWindow :
  443. case ZSTD_c_nbWorkers:
  444. case ZSTD_c_jobSize:
  445. case ZSTD_c_overlapLog:
  446. case ZSTD_c_rsyncable:
  447. case ZSTD_c_enableDedicatedDictSearch:
  448. case ZSTD_c_enableLongDistanceMatching:
  449. case ZSTD_c_ldmHashLog:
  450. case ZSTD_c_ldmMinMatch:
  451. case ZSTD_c_ldmBucketSizeLog:
  452. case ZSTD_c_ldmHashRateLog:
  453. case ZSTD_c_forceAttachDict:
  454. case ZSTD_c_literalCompressionMode:
  455. case ZSTD_c_targetCBlockSize:
  456. case ZSTD_c_srcSizeHint:
  457. case ZSTD_c_stableInBuffer:
  458. case ZSTD_c_stableOutBuffer:
  459. case ZSTD_c_blockDelimiters:
  460. case ZSTD_c_validateSequences:
  461. default:
  462. return 0;
  463. }
  464. }
  465. size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
  466. {
  467. DEBUGLOG(4, "ZSTD_CCtx_setParameter (%i, %i)", (int)param, value);
  468. if (cctx->streamStage != zcss_init) {
  469. if (ZSTD_isUpdateAuthorized(param)) {
  470. cctx->cParamsChanged = 1;
  471. } else {
  472. RETURN_ERROR(stage_wrong, "can only set params in ctx init stage");
  473. } }
  474. switch(param)
  475. {
  476. case ZSTD_c_nbWorkers:
  477. RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported,
  478. "MT not compatible with static alloc");
  479. break;
  480. case ZSTD_c_compressionLevel:
  481. case ZSTD_c_windowLog:
  482. case ZSTD_c_hashLog:
  483. case ZSTD_c_chainLog:
  484. case ZSTD_c_searchLog:
  485. case ZSTD_c_minMatch:
  486. case ZSTD_c_targetLength:
  487. case ZSTD_c_strategy:
  488. case ZSTD_c_ldmHashRateLog:
  489. case ZSTD_c_format:
  490. case ZSTD_c_contentSizeFlag:
  491. case ZSTD_c_checksumFlag:
  492. case ZSTD_c_dictIDFlag:
  493. case ZSTD_c_forceMaxWindow:
  494. case ZSTD_c_forceAttachDict:
  495. case ZSTD_c_literalCompressionMode:
  496. case ZSTD_c_jobSize:
  497. case ZSTD_c_overlapLog:
  498. case ZSTD_c_rsyncable:
  499. case ZSTD_c_enableDedicatedDictSearch:
  500. case ZSTD_c_enableLongDistanceMatching:
  501. case ZSTD_c_ldmHashLog:
  502. case ZSTD_c_ldmMinMatch:
  503. case ZSTD_c_ldmBucketSizeLog:
  504. case ZSTD_c_targetCBlockSize:
  505. case ZSTD_c_srcSizeHint:
  506. case ZSTD_c_stableInBuffer:
  507. case ZSTD_c_stableOutBuffer:
  508. case ZSTD_c_blockDelimiters:
  509. case ZSTD_c_validateSequences:
  510. break;
  511. default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
  512. }
  513. return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value);
  514. }
  515. size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
  516. ZSTD_cParameter param, int value)
  517. {
  518. DEBUGLOG(4, "ZSTD_CCtxParams_setParameter (%i, %i)", (int)param, value);
  519. switch(param)
  520. {
  521. case ZSTD_c_format :
  522. BOUNDCHECK(ZSTD_c_format, value);
  523. CCtxParams->format = (ZSTD_format_e)value;
  524. return (size_t)CCtxParams->format;
  525. case ZSTD_c_compressionLevel : {
  526. FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), "");
  527. if (value == 0)
  528. CCtxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
  529. else
  530. CCtxParams->compressionLevel = value;
  531. if (CCtxParams->compressionLevel >= 0) return (size_t)CCtxParams->compressionLevel;
  532. return 0; /* return type (size_t) cannot represent negative values */
  533. }
  534. case ZSTD_c_windowLog :
  535. if (value!=0) /* 0 => use default */
  536. BOUNDCHECK(ZSTD_c_windowLog, value);
  537. CCtxParams->cParams.windowLog = (U32)value;
  538. return CCtxParams->cParams.windowLog;
  539. case ZSTD_c_hashLog :
  540. if (value!=0) /* 0 => use default */
  541. BOUNDCHECK(ZSTD_c_hashLog, value);
  542. CCtxParams->cParams.hashLog = (U32)value;
  543. return CCtxParams->cParams.hashLog;
  544. case ZSTD_c_chainLog :
  545. if (value!=0) /* 0 => use default */
  546. BOUNDCHECK(ZSTD_c_chainLog, value);
  547. CCtxParams->cParams.chainLog = (U32)value;
  548. return CCtxParams->cParams.chainLog;
  549. case ZSTD_c_searchLog :
  550. if (value!=0) /* 0 => use default */
  551. BOUNDCHECK(ZSTD_c_searchLog, value);
  552. CCtxParams->cParams.searchLog = (U32)value;
  553. return (size_t)value;
  554. case ZSTD_c_minMatch :
  555. if (value!=0) /* 0 => use default */
  556. BOUNDCHECK(ZSTD_c_minMatch, value);
  557. CCtxParams->cParams.minMatch = value;
  558. return CCtxParams->cParams.minMatch;
  559. case ZSTD_c_targetLength :
  560. BOUNDCHECK(ZSTD_c_targetLength, value);
  561. CCtxParams->cParams.targetLength = value;
  562. return CCtxParams->cParams.targetLength;
  563. case ZSTD_c_strategy :
  564. if (value!=0) /* 0 => use default */
  565. BOUNDCHECK(ZSTD_c_strategy, value);
  566. CCtxParams->cParams.strategy = (ZSTD_strategy)value;
  567. return (size_t)CCtxParams->cParams.strategy;
  568. case ZSTD_c_contentSizeFlag :
  569. /* Content size written in frame header _when known_ (default:1) */
  570. DEBUGLOG(4, "set content size flag = %u", (value!=0));
  571. CCtxParams->fParams.contentSizeFlag = value != 0;
  572. return CCtxParams->fParams.contentSizeFlag;
  573. case ZSTD_c_checksumFlag :
  574. /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
  575. CCtxParams->fParams.checksumFlag = value != 0;
  576. return CCtxParams->fParams.checksumFlag;
  577. case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
  578. DEBUGLOG(4, "set dictIDFlag = %u", (value!=0));
  579. CCtxParams->fParams.noDictIDFlag = !value;
  580. return !CCtxParams->fParams.noDictIDFlag;
  581. case ZSTD_c_forceMaxWindow :
  582. CCtxParams->forceWindow = (value != 0);
  583. return CCtxParams->forceWindow;
  584. case ZSTD_c_forceAttachDict : {
  585. const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value;
  586. BOUNDCHECK(ZSTD_c_forceAttachDict, pref);
  587. CCtxParams->attachDictPref = pref;
  588. return CCtxParams->attachDictPref;
  589. }
  590. case ZSTD_c_literalCompressionMode : {
  591. const ZSTD_literalCompressionMode_e lcm = (ZSTD_literalCompressionMode_e)value;
  592. BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm);
  593. CCtxParams->literalCompressionMode = lcm;
  594. return CCtxParams->literalCompressionMode;
  595. }
  596. case ZSTD_c_nbWorkers :
  597. #ifndef ZSTD_MULTITHREAD
  598. RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
  599. return 0;
  600. #else
  601. FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), "");
  602. CCtxParams->nbWorkers = value;
  603. return CCtxParams->nbWorkers;
  604. #endif
  605. case ZSTD_c_jobSize :
  606. #ifndef ZSTD_MULTITHREAD
  607. RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
  608. return 0;
  609. #else
  610. /* Adjust to the minimum non-default value. */
  611. if (value != 0 && value < ZSTDMT_JOBSIZE_MIN)
  612. value = ZSTDMT_JOBSIZE_MIN;
  613. FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), "");
  614. assert(value >= 0);
  615. CCtxParams->jobSize = value;
  616. return CCtxParams->jobSize;
  617. #endif
  618. case ZSTD_c_overlapLog :
  619. #ifndef ZSTD_MULTITHREAD
  620. RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
  621. return 0;
  622. #else
  623. FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value), "");
  624. CCtxParams->overlapLog = value;
  625. return CCtxParams->overlapLog;
  626. #endif
  627. case ZSTD_c_rsyncable :
  628. #ifndef ZSTD_MULTITHREAD
  629. RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
  630. return 0;
  631. #else
  632. FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value), "");
  633. CCtxParams->rsyncable = value;
  634. return CCtxParams->rsyncable;
  635. #endif
  636. case ZSTD_c_enableDedicatedDictSearch :
  637. CCtxParams->enableDedicatedDictSearch = (value!=0);
  638. return CCtxParams->enableDedicatedDictSearch;
  639. case ZSTD_c_enableLongDistanceMatching :
  640. CCtxParams->ldmParams.enableLdm = (value!=0);
  641. return CCtxParams->ldmParams.enableLdm;
  642. case ZSTD_c_ldmHashLog :
  643. if (value!=0) /* 0 ==> auto */
  644. BOUNDCHECK(ZSTD_c_ldmHashLog, value);
  645. CCtxParams->ldmParams.hashLog = value;
  646. return CCtxParams->ldmParams.hashLog;
  647. case ZSTD_c_ldmMinMatch :
  648. if (value!=0) /* 0 ==> default */
  649. BOUNDCHECK(ZSTD_c_ldmMinMatch, value);
  650. CCtxParams->ldmParams.minMatchLength = value;
  651. return CCtxParams->ldmParams.minMatchLength;
  652. case ZSTD_c_ldmBucketSizeLog :
  653. if (value!=0) /* 0 ==> default */
  654. BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value);
  655. CCtxParams->ldmParams.bucketSizeLog = value;
  656. return CCtxParams->ldmParams.bucketSizeLog;
  657. case ZSTD_c_ldmHashRateLog :
  658. RETURN_ERROR_IF(value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN,
  659. parameter_outOfBound, "Param out of bounds!");
  660. CCtxParams->ldmParams.hashRateLog = value;
  661. return CCtxParams->ldmParams.hashRateLog;
  662. case ZSTD_c_targetCBlockSize :
  663. if (value!=0) /* 0 ==> default */
  664. BOUNDCHECK(ZSTD_c_targetCBlockSize, value);
  665. CCtxParams->targetCBlockSize = value;
  666. return CCtxParams->targetCBlockSize;
  667. case ZSTD_c_srcSizeHint :
  668. if (value!=0) /* 0 ==> default */
  669. BOUNDCHECK(ZSTD_c_srcSizeHint, value);
  670. CCtxParams->srcSizeHint = value;
  671. return CCtxParams->srcSizeHint;
  672. case ZSTD_c_stableInBuffer:
  673. BOUNDCHECK(ZSTD_c_stableInBuffer, value);
  674. CCtxParams->inBufferMode = (ZSTD_bufferMode_e)value;
  675. return CCtxParams->inBufferMode;
  676. case ZSTD_c_stableOutBuffer:
  677. BOUNDCHECK(ZSTD_c_stableOutBuffer, value);
  678. CCtxParams->outBufferMode = (ZSTD_bufferMode_e)value;
  679. return CCtxParams->outBufferMode;
  680. case ZSTD_c_blockDelimiters:
  681. BOUNDCHECK(ZSTD_c_blockDelimiters, value);
  682. CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value;
  683. return CCtxParams->blockDelimiters;
  684. case ZSTD_c_validateSequences:
  685. BOUNDCHECK(ZSTD_c_validateSequences, value);
  686. CCtxParams->validateSequences = value;
  687. return CCtxParams->validateSequences;
  688. default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
  689. }
  690. }
  691. size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value)
  692. {
  693. return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value);
  694. }
  695. size_t ZSTD_CCtxParams_getParameter(
  696. ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, int* value)
  697. {
  698. switch(param)
  699. {
  700. case ZSTD_c_format :
  701. *value = CCtxParams->format;
  702. break;
  703. case ZSTD_c_compressionLevel :
  704. *value = CCtxParams->compressionLevel;
  705. break;
  706. case ZSTD_c_windowLog :
  707. *value = (int)CCtxParams->cParams.windowLog;
  708. break;
  709. case ZSTD_c_hashLog :
  710. *value = (int)CCtxParams->cParams.hashLog;
  711. break;
  712. case ZSTD_c_chainLog :
  713. *value = (int)CCtxParams->cParams.chainLog;
  714. break;
  715. case ZSTD_c_searchLog :
  716. *value = CCtxParams->cParams.searchLog;
  717. break;
  718. case ZSTD_c_minMatch :
  719. *value = CCtxParams->cParams.minMatch;
  720. break;
  721. case ZSTD_c_targetLength :
  722. *value = CCtxParams->cParams.targetLength;
  723. break;
  724. case ZSTD_c_strategy :
  725. *value = (unsigned)CCtxParams->cParams.strategy;
  726. break;
  727. case ZSTD_c_contentSizeFlag :
  728. *value = CCtxParams->fParams.contentSizeFlag;
  729. break;
  730. case ZSTD_c_checksumFlag :
  731. *value = CCtxParams->fParams.checksumFlag;
  732. break;
  733. case ZSTD_c_dictIDFlag :
  734. *value = !CCtxParams->fParams.noDictIDFlag;
  735. break;
  736. case ZSTD_c_forceMaxWindow :
  737. *value = CCtxParams->forceWindow;
  738. break;
  739. case ZSTD_c_forceAttachDict :
  740. *value = CCtxParams->attachDictPref;
  741. break;
  742. case ZSTD_c_literalCompressionMode :
  743. *value = CCtxParams->literalCompressionMode;
  744. break;
  745. case ZSTD_c_nbWorkers :
  746. #ifndef ZSTD_MULTITHREAD
  747. assert(CCtxParams->nbWorkers == 0);
  748. #endif
  749. *value = CCtxParams->nbWorkers;
  750. break;
  751. case ZSTD_c_jobSize :
  752. #ifndef ZSTD_MULTITHREAD
  753. RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
  754. #else
  755. assert(CCtxParams->jobSize <= INT_MAX);
  756. *value = (int)CCtxParams->jobSize;
  757. break;
  758. #endif
  759. case ZSTD_c_overlapLog :
  760. #ifndef ZSTD_MULTITHREAD
  761. RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
  762. #else
  763. *value = CCtxParams->overlapLog;
  764. break;
  765. #endif
  766. case ZSTD_c_rsyncable :
  767. #ifndef ZSTD_MULTITHREAD
  768. RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
  769. #else
  770. *value = CCtxParams->rsyncable;
  771. break;
  772. #endif
  773. case ZSTD_c_enableDedicatedDictSearch :
  774. *value = CCtxParams->enableDedicatedDictSearch;
  775. break;
  776. case ZSTD_c_enableLongDistanceMatching :
  777. *value = CCtxParams->ldmParams.enableLdm;
  778. break;
  779. case ZSTD_c_ldmHashLog :
  780. *value = CCtxParams->ldmParams.hashLog;
  781. break;
  782. case ZSTD_c_ldmMinMatch :
  783. *value = CCtxParams->ldmParams.minMatchLength;
  784. break;
  785. case ZSTD_c_ldmBucketSizeLog :
  786. *value = CCtxParams->ldmParams.bucketSizeLog;
  787. break;
  788. case ZSTD_c_ldmHashRateLog :
  789. *value = CCtxParams->ldmParams.hashRateLog;
  790. break;
  791. case ZSTD_c_targetCBlockSize :
  792. *value = (int)CCtxParams->targetCBlockSize;
  793. break;
  794. case ZSTD_c_srcSizeHint :
  795. *value = (int)CCtxParams->srcSizeHint;
  796. break;
  797. case ZSTD_c_stableInBuffer :
  798. *value = (int)CCtxParams->inBufferMode;
  799. break;
  800. case ZSTD_c_stableOutBuffer :
  801. *value = (int)CCtxParams->outBufferMode;
  802. break;
  803. case ZSTD_c_blockDelimiters :
  804. *value = (int)CCtxParams->blockDelimiters;
  805. break;
  806. case ZSTD_c_validateSequences :
  807. *value = (int)CCtxParams->validateSequences;
  808. break;
  809. default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
  810. }
  811. return 0;
  812. }
  813. /** ZSTD_CCtx_setParametersUsingCCtxParams() :
  814. * just applies `params` into `cctx`
  815. * no action is performed, parameters are merely stored.
  816. * If ZSTDMT is enabled, parameters are pushed to cctx->mtctx.
  817. * This is possible even if a compression is ongoing.
  818. * In which case, new parameters will be applied on the fly, starting with next compression job.
  819. */
  820. size_t ZSTD_CCtx_setParametersUsingCCtxParams(
  821. ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
  822. {
  823. DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams");
  824. RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
  825. "The context is in the wrong stage!");
  826. RETURN_ERROR_IF(cctx->cdict, stage_wrong,
  827. "Can't override parameters with cdict attached (some must "
  828. "be inherited from the cdict).");
  829. cctx->requestedParams = *params;
  830. return 0;
  831. }
  832. ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
  833. {
  834. DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
  835. RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
  836. "Can't set pledgedSrcSize when not in init stage.");
  837. cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
  838. return 0;
  839. }
  840. static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(
  841. int const compressionLevel,
  842. size_t const dictSize);
  843. static int ZSTD_dedicatedDictSearch_isSupported(
  844. const ZSTD_compressionParameters* cParams);
  845. static void ZSTD_dedicatedDictSearch_revertCParams(
  846. ZSTD_compressionParameters* cParams);
  847. /**
  848. * Initializes the local dict using the requested parameters.
  849. * NOTE: This does not use the pledged src size, because it may be used for more
  850. * than one compression.
  851. */
  852. static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
  853. {
  854. ZSTD_localDict* const dl = &cctx->localDict;
  855. if (dl->dict == NULL) {
  856. /* No local dictionary. */
  857. assert(dl->dictBuffer == NULL);
  858. assert(dl->cdict == NULL);
  859. assert(dl->dictSize == 0);
  860. return 0;
  861. }
  862. if (dl->cdict != NULL) {
  863. assert(cctx->cdict == dl->cdict);
  864. /* Local dictionary already initialized. */
  865. return 0;
  866. }
  867. assert(dl->dictSize > 0);
  868. assert(cctx->cdict == NULL);
  869. assert(cctx->prefixDict.dict == NULL);
  870. dl->cdict = ZSTD_createCDict_advanced2(
  871. dl->dict,
  872. dl->dictSize,
  873. ZSTD_dlm_byRef,
  874. dl->dictContentType,
  875. &cctx->requestedParams,
  876. cctx->customMem);
  877. RETURN_ERROR_IF(!dl->cdict, memory_allocation, "ZSTD_createCDict_advanced failed");
  878. cctx->cdict = dl->cdict;
  879. return 0;
  880. }
  881. size_t ZSTD_CCtx_loadDictionary_advanced(
  882. ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
  883. ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
  884. {
  885. RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
  886. "Can't load a dictionary when ctx is not in init stage.");
  887. DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
  888. ZSTD_clearAllDicts(cctx); /* in case one already exists */
  889. if (dict == NULL || dictSize == 0) /* no dictionary mode */
  890. return 0;
  891. if (dictLoadMethod == ZSTD_dlm_byRef) {
  892. cctx->localDict.dict = dict;
  893. } else {
  894. void* dictBuffer;
  895. RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
  896. "no malloc for static CCtx");
  897. dictBuffer = ZSTD_customMalloc(dictSize, cctx->customMem);
  898. RETURN_ERROR_IF(!dictBuffer, memory_allocation, "NULL pointer!");
  899. ZSTD_memcpy(dictBuffer, dict, dictSize);
  900. cctx->localDict.dictBuffer = dictBuffer;
  901. cctx->localDict.dict = dictBuffer;
  902. }
  903. cctx->localDict.dictSize = dictSize;
  904. cctx->localDict.dictContentType = dictContentType;
  905. return 0;
  906. }
  907. ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(
  908. ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
  909. {
  910. return ZSTD_CCtx_loadDictionary_advanced(
  911. cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
  912. }
  913. ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
  914. {
  915. return ZSTD_CCtx_loadDictionary_advanced(
  916. cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
  917. }
  918. size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
  919. {
  920. RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
  921. "Can't ref a dict when ctx not in init stage.");
  922. /* Free the existing local cdict (if any) to save memory. */
  923. ZSTD_clearAllDicts(cctx);
  924. cctx->cdict = cdict;
  925. return 0;
  926. }
  927. size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool)
  928. {
  929. RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
  930. "Can't ref a pool when ctx not in init stage.");
  931. cctx->pool = pool;
  932. return 0;
  933. }
  934. size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize)
  935. {
  936. return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent);
  937. }
  938. size_t ZSTD_CCtx_refPrefix_advanced(
  939. ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
  940. {
  941. RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
  942. "Can't ref a prefix when ctx not in init stage.");
  943. ZSTD_clearAllDicts(cctx);
  944. if (prefix != NULL && prefixSize > 0) {
  945. cctx->prefixDict.dict = prefix;
  946. cctx->prefixDict.dictSize = prefixSize;
  947. cctx->prefixDict.dictContentType = dictContentType;
  948. }
  949. return 0;
  950. }
  951. /*! ZSTD_CCtx_reset() :
  952. * Also dumps dictionary */
  953. size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
  954. {
  955. if ( (reset == ZSTD_reset_session_only)
  956. || (reset == ZSTD_reset_session_and_parameters) ) {
  957. cctx->streamStage = zcss_init;
  958. cctx->pledgedSrcSizePlusOne = 0;
  959. }
  960. if ( (reset == ZSTD_reset_parameters)
  961. || (reset == ZSTD_reset_session_and_parameters) ) {
  962. RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
  963. "Can't reset parameters only when not in init stage.");
  964. ZSTD_clearAllDicts(cctx);
  965. return ZSTD_CCtxParams_reset(&cctx->requestedParams);
  966. }
  967. return 0;
  968. }
  969. /** ZSTD_checkCParams() :
  970. control CParam values remain within authorized range.
  971. @return : 0, or an error code if one value is beyond authorized range */
  972. size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
  973. {
  974. BOUNDCHECK(ZSTD_c_windowLog, (int)cParams.windowLog);
  975. BOUNDCHECK(ZSTD_c_chainLog, (int)cParams.chainLog);
  976. BOUNDCHECK(ZSTD_c_hashLog, (int)cParams.hashLog);
  977. BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog);
  978. BOUNDCHECK(ZSTD_c_minMatch, (int)cParams.minMatch);
  979. BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength);
  980. BOUNDCHECK(ZSTD_c_strategy, cParams.strategy);
  981. return 0;
  982. }
  983. /** ZSTD_clampCParams() :
  984. * make CParam values within valid range.
  985. * @return : valid CParams */
  986. static ZSTD_compressionParameters
  987. ZSTD_clampCParams(ZSTD_compressionParameters cParams)
  988. {
  989. # define CLAMP_TYPE(cParam, val, type) { \
  990. ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \
  991. if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound; \
  992. else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \
  993. }
  994. # define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned)
  995. CLAMP(ZSTD_c_windowLog, cParams.windowLog);
  996. CLAMP(ZSTD_c_chainLog, cParams.chainLog);
  997. CLAMP(ZSTD_c_hashLog, cParams.hashLog);
  998. CLAMP(ZSTD_c_searchLog, cParams.searchLog);
  999. CLAMP(ZSTD_c_minMatch, cParams.minMatch);
  1000. CLAMP(ZSTD_c_targetLength,cParams.targetLength);
  1001. CLAMP_TYPE(ZSTD_c_strategy,cParams.strategy, ZSTD_strategy);
  1002. return cParams;
  1003. }
  1004. /** ZSTD_cycleLog() :
  1005. * condition for correct operation : hashLog > 1 */
  1006. U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
  1007. {
  1008. U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
  1009. return hashLog - btScale;
  1010. }
  1011. /** ZSTD_dictAndWindowLog() :
  1012. * Returns an adjusted window log that is large enough to fit the source and the dictionary.
  1013. * The zstd format says that the entire dictionary is valid if one byte of the dictionary
  1014. * is within the window. So the hashLog and chainLog should be large enough to reference both
  1015. * the dictionary and the window. So we must use this adjusted dictAndWindowLog when downsizing
  1016. * the hashLog and windowLog.
  1017. * NOTE: srcSize must not be ZSTD_CONTENTSIZE_UNKNOWN.
  1018. */
  1019. static U32 ZSTD_dictAndWindowLog(U32 windowLog, U64 srcSize, U64 dictSize)
  1020. {
  1021. const U64 maxWindowSize = 1ULL << ZSTD_WINDOWLOG_MAX;
  1022. /* No dictionary ==> No change */
  1023. if (dictSize == 0) {
  1024. return windowLog;
  1025. }
  1026. assert(windowLog <= ZSTD_WINDOWLOG_MAX);
  1027. assert(srcSize != ZSTD_CONTENTSIZE_UNKNOWN); /* Handled in ZSTD_adjustCParams_internal() */
  1028. {
  1029. U64 const windowSize = 1ULL << windowLog;
  1030. U64 const dictAndWindowSize = dictSize + windowSize;
  1031. /* If the window size is already large enough to fit both the source and the dictionary
  1032. * then just use the window size. Otherwise adjust so that it fits the dictionary and
  1033. * the window.
  1034. */
  1035. if (windowSize >= dictSize + srcSize) {
  1036. return windowLog; /* Window size large enough already */
  1037. } else if (dictAndWindowSize >= maxWindowSize) {
  1038. return ZSTD_WINDOWLOG_MAX; /* Larger than max window log */
  1039. } else {
  1040. return ZSTD_highbit32((U32)dictAndWindowSize - 1) + 1;
  1041. }
  1042. }
  1043. }
  1044. /** ZSTD_adjustCParams_internal() :
  1045. * optimize `cPar` for a specified input (`srcSize` and `dictSize`).
  1046. * mostly downsize to reduce memory consumption and initialization latency.
  1047. * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known.
  1048. * `mode` is the mode for parameter adjustment. See docs for `ZSTD_cParamMode_e`.
  1049. * note : `srcSize==0` means 0!
  1050. * condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */
  1051. static ZSTD_compressionParameters
  1052. ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
  1053. unsigned long long srcSize,
  1054. size_t dictSize,
  1055. ZSTD_cParamMode_e mode)
  1056. {
  1057. const U64 minSrcSize = 513; /* (1<<9) + 1 */
  1058. const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
  1059. assert(ZSTD_checkCParams(cPar)==0);
  1060. if (dictSize && srcSize == ZSTD_CONTENTSIZE_UNKNOWN)
  1061. srcSize = minSrcSize;
  1062. switch (mode) {
  1063. case ZSTD_cpm_noAttachDict:
  1064. case ZSTD_cpm_unknown:
  1065. case ZSTD_cpm_createCDict:
  1066. break;
  1067. case ZSTD_cpm_attachDict:
  1068. dictSize = 0;
  1069. break;
  1070. default:
  1071. assert(0);
  1072. break;
  1073. }
  1074. /* resize windowLog if input is small enough, to use less memory */
  1075. if ( (srcSize < maxWindowResize)
  1076. && (dictSize < maxWindowResize) ) {
  1077. U32 const tSize = (U32)(srcSize + dictSize);
  1078. static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;
  1079. U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :
  1080. ZSTD_highbit32(tSize-1) + 1;
  1081. if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
  1082. }
  1083. { U32 const dictAndWindowLog = ZSTD_dictAndWindowLog(cPar.windowLog, (U64)srcSize, (U64)dictSize);
  1084. U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
  1085. if (cPar.hashLog > dictAndWindowLog+1) cPar.hashLog = dictAndWindowLog+1;
  1086. if (cycleLog > dictAndWindowLog)
  1087. cPar.chainLog -= (cycleLog - dictAndWindowLog);
  1088. }
  1089. if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
  1090. cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* minimum wlog required for valid frame header */
  1091. return cPar;
  1092. }
  1093. ZSTD_compressionParameters
  1094. ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
  1095. unsigned long long srcSize,
  1096. size_t dictSize)
  1097. {
  1098. cPar = ZSTD_clampCParams(cPar); /* resulting cPar is necessarily valid (all parameters within range) */
  1099. if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN;
  1100. return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown);
  1101. }
  1102. static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
  1103. static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
  1104. static void ZSTD_overrideCParams(
  1105. ZSTD_compressionParameters* cParams,
  1106. const ZSTD_compressionParameters* overrides)
  1107. {
  1108. if (overrides->windowLog) cParams->windowLog = overrides->windowLog;
  1109. if (overrides->hashLog) cParams->hashLog = overrides->hashLog;
  1110. if (overrides->chainLog) cParams->chainLog = overrides->chainLog;
  1111. if (overrides->searchLog) cParams->searchLog = overrides->searchLog;
  1112. if (overrides->minMatch) cParams->minMatch = overrides->minMatch;
  1113. if (overrides->targetLength) cParams->targetLength = overrides->targetLength;
  1114. if (overrides->strategy) cParams->strategy = overrides->strategy;
  1115. }
  1116. ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
  1117. const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
  1118. {
  1119. ZSTD_compressionParameters cParams;
  1120. if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) {
  1121. srcSizeHint = CCtxParams->srcSizeHint;
  1122. }
  1123. cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode);
  1124. if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
  1125. ZSTD_overrideCParams(&cParams, &CCtxParams->cParams);
  1126. assert(!ZSTD_checkCParams(cParams));
  1127. /* srcSizeHint == 0 means 0 */
  1128. return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode);
  1129. }
  1130. static size_t
  1131. ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
  1132. const U32 forCCtx)
  1133. {
  1134. size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
  1135. size_t const hSize = ((size_t)1) << cParams->hashLog;
  1136. U32 const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
  1137. size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
  1138. /* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't
  1139. * surrounded by redzones in ASAN. */
  1140. size_t const tableSpace = chainSize * sizeof(U32)
  1141. + hSize * sizeof(U32)
  1142. + h3Size * sizeof(U32);
  1143. size_t const optPotentialSpace =
  1144. ZSTD_cwksp_alloc_size((MaxML+1) * sizeof(U32))
  1145. + ZSTD_cwksp_alloc_size((MaxLL+1) * sizeof(U32))
  1146. + ZSTD_cwksp_alloc_size((MaxOff+1) * sizeof(U32))
  1147. + ZSTD_cwksp_alloc_size((1<<Litbits) * sizeof(U32))
  1148. + ZSTD_cwksp_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t))
  1149. + ZSTD_cwksp_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
  1150. size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))
  1151. ? optPotentialSpace
  1152. : 0;
  1153. DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u",
  1154. (U32)chainSize, (U32)hSize, (U32)h3Size);
  1155. return tableSpace + optSpace;
  1156. }
  1157. static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
  1158. const ZSTD_compressionParameters* cParams,
  1159. const ldmParams_t* ldmParams,
  1160. const int isStatic,
  1161. const size_t buffInSize,
  1162. const size_t buffOutSize,
  1163. const U64 pledgedSrcSize)
  1164. {
  1165. size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << cParams->windowLog), pledgedSrcSize));
  1166. size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
  1167. U32 const divider = (cParams->minMatch==3) ? 3 : 4;
  1168. size_t const maxNbSeq = blockSize / divider;
  1169. size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize)
  1170. + ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(seqDef))
  1171. + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE));
  1172. size_t const entropySpace = ZSTD_cwksp_alloc_size(ENTROPY_WORKSPACE_SIZE);
  1173. size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t));
  1174. size_t const matchStateSize = ZSTD_sizeof_matchState(cParams, /* forCCtx */ 1);
  1175. size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams);
  1176. size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize);
  1177. size_t const ldmSeqSpace = ldmParams->enableLdm ?
  1178. ZSTD_cwksp_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0;
  1179. size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize)
  1180. + ZSTD_cwksp_alloc_size(buffOutSize);
  1181. size_t const cctxSpace = isStatic ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0;
  1182. size_t const neededSpace =
  1183. cctxSpace +
  1184. entropySpace +
  1185. blockStateSpace +
  1186. ldmSpace +
  1187. ldmSeqSpace +
  1188. matchStateSize +
  1189. tokenSpace +
  1190. bufferSpace;
  1191. DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace);
  1192. return neededSpace;
  1193. }
  1194. size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
  1195. {
  1196. ZSTD_compressionParameters const cParams =
  1197. ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
  1198. RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
  1199. /* estimateCCtxSize is for one-shot compression. So no buffers should
  1200. * be needed. However, we still allocate two 0-sized buffers, which can
  1201. * take space under ASAN. */
  1202. return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
  1203. &cParams, &params->ldmParams, 1, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN);
  1204. }
  1205. size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
  1206. {
  1207. ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
  1208. return ZSTD_estimateCCtxSize_usingCCtxParams(&params);
  1209. }
  1210. static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel)
  1211. {
  1212. ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
  1213. return ZSTD_estimateCCtxSize_usingCParams(cParams);
  1214. }
  1215. size_t ZSTD_estimateCCtxSize(int compressionLevel)
  1216. {
  1217. int level;
  1218. size_t memBudget = 0;
  1219. for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
  1220. size_t const newMB = ZSTD_estimateCCtxSize_internal(level);
  1221. if (newMB > memBudget) memBudget = newMB;
  1222. }
  1223. return memBudget;
  1224. }
  1225. size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
  1226. {
  1227. RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
  1228. { ZSTD_compressionParameters const cParams =
  1229. ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
  1230. size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
  1231. size_t const inBuffSize = (params->inBufferMode == ZSTD_bm_buffered)
  1232. ? ((size_t)1 << cParams.windowLog) + blockSize
  1233. : 0;
  1234. size_t const outBuffSize = (params->outBufferMode == ZSTD_bm_buffered)
  1235. ? ZSTD_compressBound(blockSize) + 1
  1236. : 0;
  1237. return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
  1238. &cParams, &params->ldmParams, 1, inBuffSize, outBuffSize,
  1239. ZSTD_CONTENTSIZE_UNKNOWN);
  1240. }
  1241. }
  1242. size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
  1243. {
  1244. ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
  1245. return ZSTD_estimateCStreamSize_usingCCtxParams(&params);
  1246. }
  1247. static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel)
  1248. {
  1249. ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
  1250. return ZSTD_estimateCStreamSize_usingCParams(cParams);
  1251. }
  1252. size_t ZSTD_estimateCStreamSize(int compressionLevel)
  1253. {
  1254. int level;
  1255. size_t memBudget = 0;
  1256. for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
  1257. size_t const newMB = ZSTD_estimateCStreamSize_internal(level);
  1258. if (newMB > memBudget) memBudget = newMB;
  1259. }
  1260. return memBudget;
  1261. }
  1262. /* ZSTD_getFrameProgression():
  1263. * tells how much data has been consumed (input) and produced (output) for current frame.
  1264. * able to count progression inside worker threads (non-blocking mode).
  1265. */
  1266. ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx)
  1267. {
  1268. #ifdef ZSTD_MULTITHREAD
  1269. if (cctx->appliedParams.nbWorkers > 0) {
  1270. return ZSTDMT_getFrameProgression(cctx->mtctx);
  1271. }
  1272. #endif
  1273. { ZSTD_frameProgression fp;
  1274. size_t const buffered = (cctx->inBuff == NULL) ? 0 :
  1275. cctx->inBuffPos - cctx->inToCompress;
  1276. if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress);
  1277. assert(buffered <= ZSTD_BLOCKSIZE_MAX);
  1278. fp.ingested = cctx->consumedSrcSize + buffered;
  1279. fp.consumed = cctx->consumedSrcSize;
  1280. fp.produced = cctx->producedCSize;
  1281. fp.flushed = cctx->producedCSize; /* simplified; some data might still be left within streaming output buffer */
  1282. fp.currentJobID = 0;
  1283. fp.nbActiveWorkers = 0;
  1284. return fp;
  1285. } }
  1286. /*! ZSTD_toFlushNow()
  1287. * Only useful for multithreading scenarios currently (nbWorkers >= 1).
  1288. */
  1289. size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx)
  1290. {
  1291. #ifdef ZSTD_MULTITHREAD
  1292. if (cctx->appliedParams.nbWorkers > 0) {
  1293. return ZSTDMT_toFlushNow(cctx->mtctx);
  1294. }
  1295. #endif
  1296. (void)cctx;
  1297. return 0; /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */
  1298. }
  1299. static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1,
  1300. ZSTD_compressionParameters cParams2)
  1301. {
  1302. (void)cParams1;
  1303. (void)cParams2;
  1304. assert(cParams1.windowLog == cParams2.windowLog);
  1305. assert(cParams1.chainLog == cParams2.chainLog);
  1306. assert(cParams1.hashLog == cParams2.hashLog);
  1307. assert(cParams1.searchLog == cParams2.searchLog);
  1308. assert(cParams1.minMatch == cParams2.minMatch);
  1309. assert(cParams1.targetLength == cParams2.targetLength);
  1310. assert(cParams1.strategy == cParams2.strategy);
  1311. }
  1312. void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
  1313. {
  1314. int i;
  1315. for (i = 0; i < ZSTD_REP_NUM; ++i)
  1316. bs->rep[i] = repStartValue[i];
  1317. bs->entropy.huf.repeatMode = HUF_repeat_none;
  1318. bs->entropy.fse.offcode_repeatMode = FSE_repeat_none;
  1319. bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none;
  1320. bs->entropy.fse.litlength_repeatMode = FSE_repeat_none;
  1321. }
  1322. /*! ZSTD_invalidateMatchState()
  1323. * Invalidate all the matches in the match finder tables.
  1324. * Requires nextSrc and base to be set (can be NULL).
  1325. */
  1326. static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
  1327. {
  1328. ZSTD_window_clear(&ms->window);
  1329. ms->nextToUpdate = ms->window.dictLimit;
  1330. ms->loadedDictEnd = 0;
  1331. ms->opt.litLengthSum = 0; /* force reset of btopt stats */
  1332. ms->dictMatchState = NULL;
  1333. }
  1334. /**
  1335. * Controls, for this matchState reset, whether the tables need to be cleared /
  1336. * prepared for the coming compression (ZSTDcrp_makeClean), or whether the
  1337. * tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a
  1338. * subsequent operation will overwrite the table space anyways (e.g., copying
  1339. * the matchState contents in from a CDict).
  1340. */
  1341. typedef enum {
  1342. ZSTDcrp_makeClean,
  1343. ZSTDcrp_leaveDirty
  1344. } ZSTD_compResetPolicy_e;
  1345. /**
  1346. * Controls, for this matchState reset, whether indexing can continue where it
  1347. * left off (ZSTDirp_continue), or whether it needs to be restarted from zero
  1348. * (ZSTDirp_reset).
  1349. */
  1350. typedef enum {
  1351. ZSTDirp_continue,
  1352. ZSTDirp_reset
  1353. } ZSTD_indexResetPolicy_e;
  1354. typedef enum {
  1355. ZSTD_resetTarget_CDict,
  1356. ZSTD_resetTarget_CCtx
  1357. } ZSTD_resetTarget_e;
  1358. static size_t
  1359. ZSTD_reset_matchState(ZSTD_matchState_t* ms,
  1360. ZSTD_cwksp* ws,
  1361. const ZSTD_compressionParameters* cParams,
  1362. const ZSTD_compResetPolicy_e crp,
  1363. const ZSTD_indexResetPolicy_e forceResetIndex,
  1364. const ZSTD_resetTarget_e forWho)
  1365. {
  1366. size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
  1367. size_t const hSize = ((size_t)1) << cParams->hashLog;
  1368. U32 const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
  1369. size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
  1370. DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset);
  1371. if (forceResetIndex == ZSTDirp_reset) {
  1372. ZSTD_window_init(&ms->window);
  1373. ZSTD_cwksp_mark_tables_dirty(ws);
  1374. }
  1375. ms->hashLog3 = hashLog3;
  1376. ZSTD_invalidateMatchState(ms);
  1377. assert(!ZSTD_cwksp_reserve_failed(ws)); /* check that allocation hasn't already failed */
  1378. ZSTD_cwksp_clear_tables(ws);
  1379. DEBUGLOG(5, "reserving table space");
  1380. /* table Space */
  1381. ms->hashTable = (U32*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(U32));
  1382. ms->chainTable = (U32*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(U32));
  1383. ms->hashTable3 = (U32*)ZSTD_cwksp_reserve_table(ws, h3Size * sizeof(U32));
  1384. RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
  1385. "failed a workspace allocation in ZSTD_reset_matchState");
  1386. DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_leaveDirty);
  1387. if (crp!=ZSTDcrp_leaveDirty) {
  1388. /* reset tables only */
  1389. ZSTD_cwksp_clean_tables(ws);
  1390. }
  1391. /* opt parser space */
  1392. if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) {
  1393. DEBUGLOG(4, "reserving optimal parser space");
  1394. ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<<Litbits) * sizeof(unsigned));
  1395. ms->opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned));
  1396. ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned));
  1397. ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned));
  1398. ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t));
  1399. ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
  1400. }
  1401. ms->cParams = *cParams;
  1402. RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
  1403. "failed a workspace allocation in ZSTD_reset_matchState");
  1404. return 0;
  1405. }
  1406. /* ZSTD_indexTooCloseToMax() :
  1407. * minor optimization : prefer memset() rather than reduceIndex()
  1408. * which is measurably slow in some circumstances (reported for Visual Studio).
  1409. * Works when re-using a context for a lot of smallish inputs :
  1410. * if all inputs are smaller than ZSTD_INDEXOVERFLOW_MARGIN,
  1411. * memset() will be triggered before reduceIndex().
  1412. */
  1413. #define ZSTD_INDEXOVERFLOW_MARGIN (16 MB)
  1414. static int ZSTD_indexTooCloseToMax(ZSTD_window_t w)
  1415. {
  1416. return (size_t)(w.nextSrc - w.base) > (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN);
  1417. }
  1418. /*! ZSTD_resetCCtx_internal() :
  1419. note : `params` are assumed fully validated at this stage */
  1420. static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
  1421. ZSTD_CCtx_params params,
  1422. U64 const pledgedSrcSize,
  1423. ZSTD_compResetPolicy_e const crp,
  1424. ZSTD_buffered_policy_e const zbuff)
  1425. {
  1426. ZSTD_cwksp* const ws = &zc->workspace;
  1427. DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u",
  1428. (U32)pledgedSrcSize, params.cParams.windowLog);
  1429. assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
  1430. zc->isFirstBlock = 1;
  1431. if (params.ldmParams.enableLdm) {
  1432. /* Adjust long distance matching parameters */
  1433. ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
  1434. assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
  1435. assert(params.ldmParams.hashRateLog < 32);
  1436. zc->ldmState.hashPower = ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength);
  1437. }
  1438. { size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
  1439. size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
  1440. U32 const divider = (params.cParams.minMatch==3) ? 3 : 4;
  1441. size_t const maxNbSeq = blockSize / divider;
  1442. size_t const buffOutSize = (zbuff == ZSTDb_buffered && params.outBufferMode == ZSTD_bm_buffered)
  1443. ? ZSTD_compressBound(blockSize) + 1
  1444. : 0;
  1445. size_t const buffInSize = (zbuff == ZSTDb_buffered && params.inBufferMode == ZSTD_bm_buffered)
  1446. ? windowSize + blockSize
  1447. : 0;
  1448. size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params.ldmParams, blockSize);
  1449. int const indexTooClose = ZSTD_indexTooCloseToMax(zc->blockState.matchState.window);
  1450. ZSTD_indexResetPolicy_e needsIndexReset =
  1451. (!indexTooClose && zc->initialized) ? ZSTDirp_continue : ZSTDirp_reset;
  1452. size_t const neededSpace =
  1453. ZSTD_estimateCCtxSize_usingCCtxParams_internal(
  1454. &params.cParams, &params.ldmParams, zc->staticSize != 0,
  1455. buffInSize, buffOutSize, pledgedSrcSize);
  1456. FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!");
  1457. if (!zc->staticSize) ZSTD_cwksp_bump_oversized_duration(ws, 0);
  1458. /* Check if workspace is large enough, alloc a new one if needed */
  1459. {
  1460. int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace;
  1461. int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace);
  1462. DEBUGLOG(4, "Need %zu B workspace", neededSpace);
  1463. DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
  1464. if (workspaceTooSmall || workspaceWasteful) {
  1465. DEBUGLOG(4, "Resize workspaceSize from %zuKB to %zuKB",
  1466. ZSTD_cwksp_sizeof(ws) >> 10,
  1467. neededSpace >> 10);
  1468. RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize");
  1469. needsIndexReset = ZSTDirp_reset;
  1470. ZSTD_cwksp_free(ws, zc->customMem);
  1471. FORWARD_IF_ERROR(ZSTD_cwksp_create(ws, neededSpace, zc->customMem), "");
  1472. DEBUGLOG(5, "reserving object space");
  1473. /* Statically sized space.
  1474. * entropyWorkspace never moves,
  1475. * though prev/next block swap places */
  1476. assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t)));
  1477. zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
  1478. RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock");
  1479. zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
  1480. RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock");
  1481. zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, ENTROPY_WORKSPACE_SIZE);
  1482. RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate entropyWorkspace");
  1483. } }
  1484. ZSTD_cwksp_clear(ws);
  1485. /* init params */
  1486. zc->appliedParams = params;
  1487. zc->blockState.matchState.cParams = params.cParams;
  1488. zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
  1489. zc->consumedSrcSize = 0;
  1490. zc->producedCSize = 0;
  1491. if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
  1492. zc->appliedParams.fParams.contentSizeFlag = 0;
  1493. DEBUGLOG(4, "pledged content size : %u ; flag : %u",
  1494. (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
  1495. zc->blockSize = blockSize;
  1496. XXH64_reset(&zc->xxhState, 0);
  1497. zc->stage = ZSTDcs_init;
  1498. zc->dictID = 0;
  1499. ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
  1500. /* ZSTD_wildcopy() is used to copy into the literals buffer,
  1501. * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
  1502. */
  1503. zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH);
  1504. zc->seqStore.maxNbLit = blockSize;
  1505. /* buffers */
  1506. zc->bufferedPolicy = zbuff;
  1507. zc->inBuffSize = buffInSize;
  1508. zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize);
  1509. zc->outBuffSize = buffOutSize;
  1510. zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize);
  1511. /* ldm bucketOffsets table */
  1512. if (params.ldmParams.enableLdm) {
  1513. /* TODO: avoid memset? */
  1514. size_t const ldmBucketSize =
  1515. ((size_t)1) << (params.ldmParams.hashLog -
  1516. params.ldmParams.bucketSizeLog);
  1517. zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, ldmBucketSize);
  1518. ZSTD_memset(zc->ldmState.bucketOffsets, 0, ldmBucketSize);
  1519. }
  1520. /* sequences storage */
  1521. ZSTD_referenceExternalSequences(zc, NULL, 0);
  1522. zc->seqStore.maxNbSeq = maxNbSeq;
  1523. zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
  1524. zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
  1525. zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
  1526. zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef));
  1527. FORWARD_IF_ERROR(ZSTD_reset_matchState(
  1528. &zc->blockState.matchState,
  1529. ws,
  1530. &params.cParams,
  1531. crp,
  1532. needsIndexReset,
  1533. ZSTD_resetTarget_CCtx), "");
  1534. /* ldm hash table */
  1535. if (params.ldmParams.enableLdm) {
  1536. /* TODO: avoid memset? */
  1537. size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog;
  1538. zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t));
  1539. ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t));
  1540. zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq));
  1541. zc->maxNbLdmSequences = maxNbLdmSeq;
  1542. ZSTD_window_init(&zc->ldmState.window);
  1543. ZSTD_window_clear(&zc->ldmState.window);
  1544. zc->ldmState.loadedDictEnd = 0;
  1545. }
  1546. /* Due to alignment, when reusing a workspace, we can actually consume
  1547. * up to 3 extra bytes for alignment. See the comments in zstd_cwksp.h
  1548. */
  1549. assert(ZSTD_cwksp_used(ws) >= neededSpace &&
  1550. ZSTD_cwksp_used(ws) <= neededSpace + 3);
  1551. DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws));
  1552. zc->initialized = 1;
  1553. return 0;
  1554. }
  1555. }
  1556. /* ZSTD_invalidateRepCodes() :
  1557. * ensures next compression will not use repcodes from previous block.
  1558. * Note : only works with regular variant;
  1559. * do not use with extDict variant ! */
  1560. void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
  1561. int i;
  1562. for (i=0; i<ZSTD_REP_NUM; i++) cctx->blockState.prevCBlock->rep[i] = 0;
  1563. assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
  1564. }
  1565. /* These are the approximate sizes for each strategy past which copying the
  1566. * dictionary tables into the working context is faster than using them
  1567. * in-place.
  1568. */
  1569. static const size_t attachDictSizeCutoffs[ZSTD_STRATEGY_MAX+1] = {
  1570. 8 KB, /* unused */
  1571. 8 KB, /* ZSTD_fast */
  1572. 16 KB, /* ZSTD_dfast */
  1573. 32 KB, /* ZSTD_greedy */
  1574. 32 KB, /* ZSTD_lazy */
  1575. 32 KB, /* ZSTD_lazy2 */
  1576. 32 KB, /* ZSTD_btlazy2 */
  1577. 32 KB, /* ZSTD_btopt */
  1578. 8 KB, /* ZSTD_btultra */
  1579. 8 KB /* ZSTD_btultra2 */
  1580. };
  1581. static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict,
  1582. const ZSTD_CCtx_params* params,
  1583. U64 pledgedSrcSize)
  1584. {
  1585. size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy];
  1586. int const dedicatedDictSearch = cdict->matchState.dedicatedDictSearch;
  1587. return dedicatedDictSearch
  1588. || ( ( pledgedSrcSize <= cutoff
  1589. || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
  1590. || params->attachDictPref == ZSTD_dictForceAttach )
  1591. && params->attachDictPref != ZSTD_dictForceCopy
  1592. && !params->forceWindow ); /* dictMatchState isn't correctly
  1593. * handled in _enforceMaxDist */
  1594. }
  1595. static size_t
  1596. ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,
  1597. const ZSTD_CDict* cdict,
  1598. ZSTD_CCtx_params params,
  1599. U64 pledgedSrcSize,
  1600. ZSTD_buffered_policy_e zbuff)
  1601. {
  1602. {
  1603. ZSTD_compressionParameters adjusted_cdict_cParams = cdict->matchState.cParams;
  1604. unsigned const windowLog = params.cParams.windowLog;
  1605. assert(windowLog != 0);
  1606. /* Resize working context table params for input only, since the dict
  1607. * has its own tables. */
  1608. /* pledgedSrcSize == 0 means 0! */
  1609. if (cdict->matchState.dedicatedDictSearch) {
  1610. ZSTD_dedicatedDictSearch_revertCParams(&adjusted_cdict_cParams);
  1611. }
  1612. params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize,
  1613. cdict->dictContentSize, ZSTD_cpm_attachDict);
  1614. params.cParams.windowLog = windowLog;
  1615. FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
  1616. ZSTDcrp_makeClean, zbuff), "");
  1617. assert(cctx->appliedParams.cParams.strategy == adjusted_cdict_cParams.strategy);
  1618. }
  1619. { const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc
  1620. - cdict->matchState.window.base);
  1621. const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit;
  1622. if (cdictLen == 0) {
  1623. /* don't even attach dictionaries with no contents */
  1624. DEBUGLOG(4, "skipping attaching empty dictionary");
  1625. } else {
  1626. DEBUGLOG(4, "attaching dictionary into context");
  1627. cctx->blockState.matchState.dictMatchState = &cdict->matchState;
  1628. /* prep working match state so dict matches never have negative indices
  1629. * when they are translated to the working context's index space. */
  1630. if (cctx->blockState.matchState.window.dictLimit < cdictEnd) {
  1631. cctx->blockState.matchState.window.nextSrc =
  1632. cctx->blockState.matchState.window.base + cdictEnd;
  1633. ZSTD_window_clear(&cctx->blockState.matchState.window);
  1634. }
  1635. /* loadedDictEnd is expressed within the referential of the active context */
  1636. cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit;
  1637. } }
  1638. cctx->dictID = cdict->dictID;
  1639. /* copy block state */
  1640. ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
  1641. return 0;
  1642. }
  1643. static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
  1644. const ZSTD_CDict* cdict,
  1645. ZSTD_CCtx_params params,
  1646. U64 pledgedSrcSize,
  1647. ZSTD_buffered_policy_e zbuff)
  1648. {
  1649. const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
  1650. assert(!cdict->matchState.dedicatedDictSearch);
  1651. DEBUGLOG(4, "copying dictionary into context");
  1652. { unsigned const windowLog = params.cParams.windowLog;
  1653. assert(windowLog != 0);
  1654. /* Copy only compression parameters related to tables. */
  1655. params.cParams = *cdict_cParams;
  1656. params.cParams.windowLog = windowLog;
  1657. FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
  1658. ZSTDcrp_leaveDirty, zbuff), "");
  1659. assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
  1660. assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog);
  1661. assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog);
  1662. }
  1663. ZSTD_cwksp_mark_tables_dirty(&cctx->workspace);
  1664. /* copy tables */
  1665. { size_t const chainSize = (cdict_cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict_cParams->chainLog);
  1666. size_t const hSize = (size_t)1 << cdict_cParams->hashLog;
  1667. ZSTD_memcpy(cctx->blockState.matchState.hashTable,
  1668. cdict->matchState.hashTable,
  1669. hSize * sizeof(U32));
  1670. ZSTD_memcpy(cctx->blockState.matchState.chainTable,
  1671. cdict->matchState.chainTable,
  1672. chainSize * sizeof(U32));
  1673. }
  1674. /* Zero the hashTable3, since the cdict never fills it */
  1675. { int const h3log = cctx->blockState.matchState.hashLog3;
  1676. size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
  1677. assert(cdict->matchState.hashLog3 == 0);
  1678. ZSTD_memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));
  1679. }
  1680. ZSTD_cwksp_mark_tables_clean(&cctx->workspace);
  1681. /* copy dictionary offsets */
  1682. { ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
  1683. ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
  1684. dstMatchState->window = srcMatchState->window;
  1685. dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
  1686. dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
  1687. }
  1688. cctx->dictID = cdict->dictID;
  1689. /* copy block state */
  1690. ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
  1691. return 0;
  1692. }
  1693. /* We have a choice between copying the dictionary context into the working
  1694. * context, or referencing the dictionary context from the working context
  1695. * in-place. We decide here which strategy to use. */
  1696. static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
  1697. const ZSTD_CDict* cdict,
  1698. const ZSTD_CCtx_params* params,
  1699. U64 pledgedSrcSize,
  1700. ZSTD_buffered_policy_e zbuff)
  1701. {
  1702. DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)",
  1703. (unsigned)pledgedSrcSize);
  1704. if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) {
  1705. return ZSTD_resetCCtx_byAttachingCDict(
  1706. cctx, cdict, *params, pledgedSrcSize, zbuff);
  1707. } else {
  1708. return ZSTD_resetCCtx_byCopyingCDict(
  1709. cctx, cdict, *params, pledgedSrcSize, zbuff);
  1710. }
  1711. }
  1712. /*! ZSTD_copyCCtx_internal() :
  1713. * Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
  1714. * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
  1715. * The "context", in this case, refers to the hash and chain tables,
  1716. * entropy tables, and dictionary references.
  1717. * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx.
  1718. * @return : 0, or an error code */
  1719. static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
  1720. const ZSTD_CCtx* srcCCtx,
  1721. ZSTD_frameParameters fParams,
  1722. U64 pledgedSrcSize,
  1723. ZSTD_buffered_policy_e zbuff)
  1724. {
  1725. DEBUGLOG(5, "ZSTD_copyCCtx_internal");
  1726. RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong,
  1727. "Can't copy a ctx that's not in init stage.");
  1728. ZSTD_memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
  1729. { ZSTD_CCtx_params params = dstCCtx->requestedParams;
  1730. /* Copy only compression parameters related to tables. */
  1731. params.cParams = srcCCtx->appliedParams.cParams;
  1732. params.fParams = fParams;
  1733. ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize,
  1734. ZSTDcrp_leaveDirty, zbuff);
  1735. assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog);
  1736. assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy);
  1737. assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog);
  1738. assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog);
  1739. assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3);
  1740. }
  1741. ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace);
  1742. /* copy tables */
  1743. { size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog);
  1744. size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
  1745. int const h3log = srcCCtx->blockState.matchState.hashLog3;
  1746. size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
  1747. ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable,
  1748. srcCCtx->blockState.matchState.hashTable,
  1749. hSize * sizeof(U32));
  1750. ZSTD_memcpy(dstCCtx->blockState.matchState.chainTable,
  1751. srcCCtx->blockState.matchState.chainTable,
  1752. chainSize * sizeof(U32));
  1753. ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable3,
  1754. srcCCtx->blockState.matchState.hashTable3,
  1755. h3Size * sizeof(U32));
  1756. }
  1757. ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace);
  1758. /* copy dictionary offsets */
  1759. {
  1760. const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
  1761. ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
  1762. dstMatchState->window = srcMatchState->window;
  1763. dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
  1764. dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
  1765. }
  1766. dstCCtx->dictID = srcCCtx->dictID;
  1767. /* copy block state */
  1768. ZSTD_memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock));
  1769. return 0;
  1770. }
  1771. /*! ZSTD_copyCCtx() :
  1772. * Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
  1773. * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
  1774. * pledgedSrcSize==0 means "unknown".
  1775. * @return : 0, or an error code */
  1776. size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
  1777. {
  1778. ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
  1779. ZSTD_buffered_policy_e const zbuff = srcCCtx->bufferedPolicy;
  1780. ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1);
  1781. if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
  1782. fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN);
  1783. return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx,
  1784. fParams, pledgedSrcSize,
  1785. zbuff);
  1786. }
  1787. #define ZSTD_ROWSIZE 16
  1788. /*! ZSTD_reduceTable() :
  1789. * reduce table indexes by `reducerValue`, or squash to zero.
  1790. * PreserveMark preserves "unsorted mark" for btlazy2 strategy.
  1791. * It must be set to a clear 0/1 value, to remove branch during inlining.
  1792. * Presume table size is a multiple of ZSTD_ROWSIZE
  1793. * to help auto-vectorization */
  1794. FORCE_INLINE_TEMPLATE void
  1795. ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark)
  1796. {
  1797. int const nbRows = (int)size / ZSTD_ROWSIZE;
  1798. int cellNb = 0;
  1799. int rowNb;
  1800. assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */
  1801. assert(size < (1U<<31)); /* can be casted to int */
  1802. #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
  1803. /* To validate that the table re-use logic is sound, and that we don't
  1804. * access table space that we haven't cleaned, we re-"poison" the table
  1805. * space every time we mark it dirty.
  1806. *
  1807. * This function however is intended to operate on those dirty tables and
  1808. * re-clean them. So when this function is used correctly, we can unpoison
  1809. * the memory it operated on. This introduces a blind spot though, since
  1810. * if we now try to operate on __actually__ poisoned memory, we will not
  1811. * detect that. */
  1812. __msan_unpoison(table, size * sizeof(U32));
  1813. #endif
  1814. for (rowNb=0 ; rowNb < nbRows ; rowNb++) {
  1815. int column;
  1816. for (column=0; column<ZSTD_ROWSIZE; column++) {
  1817. if (preserveMark) {
  1818. U32 const adder = (table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) ? reducerValue : 0;
  1819. table[cellNb] += adder;
  1820. }
  1821. if (table[cellNb] < reducerValue) table[cellNb] = 0;
  1822. else table[cellNb] -= reducerValue;
  1823. cellNb++;
  1824. } }
  1825. }
  1826. static void ZSTD_reduceTable(U32* const table, U32 const size, U32 const reducerValue)
  1827. {
  1828. ZSTD_reduceTable_internal(table, size, reducerValue, 0);
  1829. }
  1830. static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const reducerValue)
  1831. {
  1832. ZSTD_reduceTable_internal(table, size, reducerValue, 1);
  1833. }
  1834. /*! ZSTD_reduceIndex() :
  1835. * rescale all indexes to avoid future overflow (indexes are U32) */
  1836. static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue)
  1837. {
  1838. { U32 const hSize = (U32)1 << params->cParams.hashLog;
  1839. ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
  1840. }
  1841. if (params->cParams.strategy != ZSTD_fast) {
  1842. U32 const chainSize = (U32)1 << params->cParams.chainLog;
  1843. if (params->cParams.strategy == ZSTD_btlazy2)
  1844. ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue);
  1845. else
  1846. ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue);
  1847. }
  1848. if (ms->hashLog3) {
  1849. U32 const h3Size = (U32)1 << ms->hashLog3;
  1850. ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue);
  1851. }
  1852. }
  1853. /*-*******************************************************
  1854. * Block entropic compression
  1855. *********************************************************/
  1856. /* See doc/zstd_compression_format.md for detailed format description */
  1857. void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
  1858. {
  1859. const seqDef* const sequences = seqStorePtr->sequencesStart;
  1860. BYTE* const llCodeTable = seqStorePtr->llCode;
  1861. BYTE* const ofCodeTable = seqStorePtr->ofCode;
  1862. BYTE* const mlCodeTable = seqStorePtr->mlCode;
  1863. U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
  1864. U32 u;
  1865. assert(nbSeq <= seqStorePtr->maxNbSeq);
  1866. for (u=0; u<nbSeq; u++) {
  1867. U32 const llv = sequences[u].litLength;
  1868. U32 const mlv = sequences[u].matchLength;
  1869. llCodeTable[u] = (BYTE)ZSTD_LLcode(llv);
  1870. ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
  1871. mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);
  1872. }
  1873. if (seqStorePtr->longLengthID==1)
  1874. llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
  1875. if (seqStorePtr->longLengthID==2)
  1876. mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
  1877. }
  1878. /* ZSTD_useTargetCBlockSize():
  1879. * Returns if target compressed block size param is being used.
  1880. * If used, compression will do best effort to make a compressed block size to be around targetCBlockSize.
  1881. * Returns 1 if true, 0 otherwise. */
  1882. static int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams)
  1883. {
  1884. DEBUGLOG(5, "ZSTD_useTargetCBlockSize (targetCBlockSize=%zu)", cctxParams->targetCBlockSize);
  1885. return (cctxParams->targetCBlockSize != 0);
  1886. }
  1887. /* ZSTD_entropyCompressSequences_internal():
  1888. * actually compresses both literals and sequences */
  1889. MEM_STATIC size_t
  1890. ZSTD_entropyCompressSequences_internal(seqStore_t* seqStorePtr,
  1891. const ZSTD_entropyCTables_t* prevEntropy,
  1892. ZSTD_entropyCTables_t* nextEntropy,
  1893. const ZSTD_CCtx_params* cctxParams,
  1894. void* dst, size_t dstCapacity,
  1895. void* entropyWorkspace, size_t entropyWkspSize,
  1896. const int bmi2)
  1897. {
  1898. const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
  1899. ZSTD_strategy const strategy = cctxParams->cParams.strategy;
  1900. unsigned* count = (unsigned*)entropyWorkspace;
  1901. FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
  1902. FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
  1903. FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
  1904. U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */
  1905. const seqDef* const sequences = seqStorePtr->sequencesStart;
  1906. const BYTE* const ofCodeTable = seqStorePtr->ofCode;
  1907. const BYTE* const llCodeTable = seqStorePtr->llCode;
  1908. const BYTE* const mlCodeTable = seqStorePtr->mlCode;
  1909. BYTE* const ostart = (BYTE*)dst;
  1910. BYTE* const oend = ostart + dstCapacity;
  1911. BYTE* op = ostart;
  1912. size_t const nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
  1913. BYTE* seqHead;
  1914. BYTE* lastNCount = NULL;
  1915. entropyWorkspace = count + (MaxSeq + 1);
  1916. entropyWkspSize -= (MaxSeq + 1) * sizeof(*count);
  1917. DEBUGLOG(4, "ZSTD_entropyCompressSequences_internal (nbSeq=%zu)", nbSeq);
  1918. ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
  1919. assert(entropyWkspSize >= HUF_WORKSPACE_SIZE);
  1920. /* Compress literals */
  1921. { const BYTE* const literals = seqStorePtr->litStart;
  1922. size_t const litSize = (size_t)(seqStorePtr->lit - literals);
  1923. size_t const cSize = ZSTD_compressLiterals(
  1924. &prevEntropy->huf, &nextEntropy->huf,
  1925. cctxParams->cParams.strategy,
  1926. ZSTD_disableLiteralsCompression(cctxParams),
  1927. op, dstCapacity,
  1928. literals, litSize,
  1929. entropyWorkspace, entropyWkspSize,
  1930. bmi2);
  1931. FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed");
  1932. assert(cSize <= dstCapacity);
  1933. op += cSize;
  1934. }
  1935. /* Sequences Header */
  1936. RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
  1937. dstSize_tooSmall, "Can't fit seq hdr in output buf!");
  1938. if (nbSeq < 128) {
  1939. *op++ = (BYTE)nbSeq;
  1940. } else if (nbSeq < LONGNBSEQ) {
  1941. op[0] = (BYTE)((nbSeq>>8) + 0x80);
  1942. op[1] = (BYTE)nbSeq;
  1943. op+=2;
  1944. } else {
  1945. op[0]=0xFF;
  1946. MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ));
  1947. op+=3;
  1948. }
  1949. assert(op <= oend);
  1950. if (nbSeq==0) {
  1951. /* Copy the old tables over as if we repeated them */
  1952. ZSTD_memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
  1953. return (size_t)(op - ostart);
  1954. }
  1955. /* seqHead : flags for FSE encoding type */
  1956. seqHead = op++;
  1957. assert(op <= oend);
  1958. /* convert length/distances into codes */
  1959. ZSTD_seqToCodes(seqStorePtr);
  1960. /* build CTable for Literal Lengths */
  1961. { unsigned max = MaxLL;
  1962. size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
  1963. DEBUGLOG(5, "Building LL table");
  1964. nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode;
  1965. LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode,
  1966. count, max, mostFrequent, nbSeq,
  1967. LLFSELog, prevEntropy->fse.litlengthCTable,
  1968. LL_defaultNorm, LL_defaultNormLog,
  1969. ZSTD_defaultAllowed, strategy);
  1970. assert(set_basic < set_compressed && set_rle < set_compressed);
  1971. assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
  1972. { size_t const countSize = ZSTD_buildCTable(
  1973. op, (size_t)(oend - op),
  1974. CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
  1975. count, max, llCodeTable, nbSeq,
  1976. LL_defaultNorm, LL_defaultNormLog, MaxLL,
  1977. prevEntropy->fse.litlengthCTable,
  1978. sizeof(prevEntropy->fse.litlengthCTable),
  1979. entropyWorkspace, entropyWkspSize);
  1980. FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for LitLens failed");
  1981. if (LLtype == set_compressed)
  1982. lastNCount = op;
  1983. op += countSize;
  1984. assert(op <= oend);
  1985. } }
  1986. /* build CTable for Offsets */
  1987. { unsigned max = MaxOff;
  1988. size_t const mostFrequent = HIST_countFast_wksp(
  1989. count, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
  1990. /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
  1991. ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
  1992. DEBUGLOG(5, "Building OF table");
  1993. nextEntropy->fse.offcode_repeatMode = prevEntropy->fse.offcode_repeatMode;
  1994. Offtype = ZSTD_selectEncodingType(&nextEntropy->fse.offcode_repeatMode,
  1995. count, max, mostFrequent, nbSeq,
  1996. OffFSELog, prevEntropy->fse.offcodeCTable,
  1997. OF_defaultNorm, OF_defaultNormLog,
  1998. defaultPolicy, strategy);
  1999. assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
  2000. { size_t const countSize = ZSTD_buildCTable(
  2001. op, (size_t)(oend - op),
  2002. CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
  2003. count, max, ofCodeTable, nbSeq,
  2004. OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
  2005. prevEntropy->fse.offcodeCTable,
  2006. sizeof(prevEntropy->fse.offcodeCTable),
  2007. entropyWorkspace, entropyWkspSize);
  2008. FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for Offsets failed");
  2009. if (Offtype == set_compressed)
  2010. lastNCount = op;
  2011. op += countSize;
  2012. assert(op <= oend);
  2013. } }
  2014. /* build CTable for MatchLengths */
  2015. { unsigned max = MaxML;
  2016. size_t const mostFrequent = HIST_countFast_wksp(
  2017. count, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
  2018. DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
  2019. nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode;
  2020. MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode,
  2021. count, max, mostFrequent, nbSeq,
  2022. MLFSELog, prevEntropy->fse.matchlengthCTable,
  2023. ML_defaultNorm, ML_defaultNormLog,
  2024. ZSTD_defaultAllowed, strategy);
  2025. assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
  2026. { size_t const countSize = ZSTD_buildCTable(
  2027. op, (size_t)(oend - op),
  2028. CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
  2029. count, max, mlCodeTable, nbSeq,
  2030. ML_defaultNorm, ML_defaultNormLog, MaxML,
  2031. prevEntropy->fse.matchlengthCTable,
  2032. sizeof(prevEntropy->fse.matchlengthCTable),
  2033. entropyWorkspace, entropyWkspSize);
  2034. FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for MatchLengths failed");
  2035. if (MLtype == set_compressed)
  2036. lastNCount = op;
  2037. op += countSize;
  2038. assert(op <= oend);
  2039. } }
  2040. *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
  2041. { size_t const bitstreamSize = ZSTD_encodeSequences(
  2042. op, (size_t)(oend - op),
  2043. CTable_MatchLength, mlCodeTable,
  2044. CTable_OffsetBits, ofCodeTable,
  2045. CTable_LitLength, llCodeTable,
  2046. sequences, nbSeq,
  2047. longOffsets, bmi2);
  2048. FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed");
  2049. op += bitstreamSize;
  2050. assert(op <= oend);
  2051. /* zstd versions <= 1.3.4 mistakenly report corruption when
  2052. * FSE_readNCount() receives a buffer < 4 bytes.
  2053. * Fixed by https://github.com/facebook/zstd/pull/1146.
  2054. * This can happen when the last set_compressed table present is 2
  2055. * bytes and the bitstream is only one byte.
  2056. * In this exceedingly rare case, we will simply emit an uncompressed
  2057. * block, since it isn't worth optimizing.
  2058. */
  2059. if (lastNCount && (op - lastNCount) < 4) {
  2060. /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
  2061. assert(op - lastNCount == 3);
  2062. DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
  2063. "emitting an uncompressed block.");
  2064. return 0;
  2065. }
  2066. }
  2067. DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart));
  2068. return (size_t)(op - ostart);
  2069. }
  2070. MEM_STATIC size_t
  2071. ZSTD_entropyCompressSequences(seqStore_t* seqStorePtr,
  2072. const ZSTD_entropyCTables_t* prevEntropy,
  2073. ZSTD_entropyCTables_t* nextEntropy,
  2074. const ZSTD_CCtx_params* cctxParams,
  2075. void* dst, size_t dstCapacity,
  2076. size_t srcSize,
  2077. void* entropyWorkspace, size_t entropyWkspSize,
  2078. int bmi2)
  2079. {
  2080. size_t const cSize = ZSTD_entropyCompressSequences_internal(
  2081. seqStorePtr, prevEntropy, nextEntropy, cctxParams,
  2082. dst, dstCapacity,
  2083. entropyWorkspace, entropyWkspSize, bmi2);
  2084. if (cSize == 0) return 0;
  2085. /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
  2086. * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
  2087. */
  2088. if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
  2089. return 0; /* block not compressed */
  2090. FORWARD_IF_ERROR(cSize, "ZSTD_entropyCompressSequences_internal failed");
  2091. /* Check compressibility */
  2092. { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
  2093. if (cSize >= maxCSize) return 0; /* block not compressed */
  2094. }
  2095. DEBUGLOG(4, "ZSTD_entropyCompressSequences() cSize: %zu\n", cSize);
  2096. return cSize;
  2097. }
  2098. /* ZSTD_selectBlockCompressor() :
  2099. * Not static, but internal use only (used by long distance matcher)
  2100. * assumption : strat is a valid strategy */
  2101. ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode)
  2102. {
  2103. static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = {
  2104. { ZSTD_compressBlock_fast /* default for 0 */,
  2105. ZSTD_compressBlock_fast,
  2106. ZSTD_compressBlock_doubleFast,
  2107. ZSTD_compressBlock_greedy,
  2108. ZSTD_compressBlock_lazy,
  2109. ZSTD_compressBlock_lazy2,
  2110. ZSTD_compressBlock_btlazy2,
  2111. ZSTD_compressBlock_btopt,
  2112. ZSTD_compressBlock_btultra,
  2113. ZSTD_compressBlock_btultra2 },
  2114. { ZSTD_compressBlock_fast_extDict /* default for 0 */,
  2115. ZSTD_compressBlock_fast_extDict,
  2116. ZSTD_compressBlock_doubleFast_extDict,
  2117. ZSTD_compressBlock_greedy_extDict,
  2118. ZSTD_compressBlock_lazy_extDict,
  2119. ZSTD_compressBlock_lazy2_extDict,
  2120. ZSTD_compressBlock_btlazy2_extDict,
  2121. ZSTD_compressBlock_btopt_extDict,
  2122. ZSTD_compressBlock_btultra_extDict,
  2123. ZSTD_compressBlock_btultra_extDict },
  2124. { ZSTD_compressBlock_fast_dictMatchState /* default for 0 */,
  2125. ZSTD_compressBlock_fast_dictMatchState,
  2126. ZSTD_compressBlock_doubleFast_dictMatchState,
  2127. ZSTD_compressBlock_greedy_dictMatchState,
  2128. ZSTD_compressBlock_lazy_dictMatchState,
  2129. ZSTD_compressBlock_lazy2_dictMatchState,
  2130. ZSTD_compressBlock_btlazy2_dictMatchState,
  2131. ZSTD_compressBlock_btopt_dictMatchState,
  2132. ZSTD_compressBlock_btultra_dictMatchState,
  2133. ZSTD_compressBlock_btultra_dictMatchState },
  2134. { NULL /* default for 0 */,
  2135. NULL,
  2136. NULL,
  2137. ZSTD_compressBlock_greedy_dedicatedDictSearch,
  2138. ZSTD_compressBlock_lazy_dedicatedDictSearch,
  2139. ZSTD_compressBlock_lazy2_dedicatedDictSearch,
  2140. NULL,
  2141. NULL,
  2142. NULL,
  2143. NULL }
  2144. };
  2145. ZSTD_blockCompressor selectedCompressor;
  2146. ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
  2147. assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
  2148. selectedCompressor = blockCompressor[(int)dictMode][(int)strat];
  2149. assert(selectedCompressor != NULL);
  2150. return selectedCompressor;
  2151. }
  2152. static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
  2153. const BYTE* anchor, size_t lastLLSize)
  2154. {
  2155. ZSTD_memcpy(seqStorePtr->lit, anchor, lastLLSize);
  2156. seqStorePtr->lit += lastLLSize;
  2157. }
  2158. void ZSTD_resetSeqStore(seqStore_t* ssPtr)
  2159. {
  2160. ssPtr->lit = ssPtr->litStart;
  2161. ssPtr->sequences = ssPtr->sequencesStart;
  2162. ssPtr->longLengthID = 0;
  2163. }
  2164. typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e;
  2165. static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
  2166. {
  2167. ZSTD_matchState_t* const ms = &zc->blockState.matchState;
  2168. DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize);
  2169. assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
  2170. /* Assert that we have correctly flushed the ctx params into the ms's copy */
  2171. ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);
  2172. if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
  2173. if (zc->appliedParams.cParams.strategy >= ZSTD_btopt) {
  2174. ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize);
  2175. } else {
  2176. ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch);
  2177. }
  2178. return ZSTDbss_noCompress; /* don't even attempt compression below a certain srcSize */
  2179. }
  2180. ZSTD_resetSeqStore(&(zc->seqStore));
  2181. /* required for optimal parser to read stats from dictionary */
  2182. ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy;
  2183. /* tell the optimal parser how we expect to compress literals */
  2184. ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode;
  2185. /* a gap between an attached dict and the current window is not safe,
  2186. * they must remain adjacent,
  2187. * and when that stops being the case, the dict must be unset */
  2188. assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit);
  2189. /* limited update after a very long match */
  2190. { const BYTE* const base = ms->window.base;
  2191. const BYTE* const istart = (const BYTE*)src;
  2192. const U32 curr = (U32)(istart-base);
  2193. if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1)); /* ensure no overflow */
  2194. if (curr > ms->nextToUpdate + 384)
  2195. ms->nextToUpdate = curr - MIN(192, (U32)(curr - ms->nextToUpdate - 384));
  2196. }
  2197. /* select and store sequences */
  2198. { ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms);
  2199. size_t lastLLSize;
  2200. { int i;
  2201. for (i = 0; i < ZSTD_REP_NUM; ++i)
  2202. zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i];
  2203. }
  2204. if (zc->externSeqStore.pos < zc->externSeqStore.size) {
  2205. assert(!zc->appliedParams.ldmParams.enableLdm);
  2206. /* Updates ldmSeqStore.pos */
  2207. lastLLSize =
  2208. ZSTD_ldm_blockCompress(&zc->externSeqStore,
  2209. ms, &zc->seqStore,
  2210. zc->blockState.nextCBlock->rep,
  2211. src, srcSize);
  2212. assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
  2213. } else if (zc->appliedParams.ldmParams.enableLdm) {
  2214. rawSeqStore_t ldmSeqStore = kNullRawSeqStore;
  2215. ldmSeqStore.seq = zc->ldmSequences;
  2216. ldmSeqStore.capacity = zc->maxNbLdmSequences;
  2217. /* Updates ldmSeqStore.size */
  2218. FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
  2219. &zc->appliedParams.ldmParams,
  2220. src, srcSize), "");
  2221. /* Updates ldmSeqStore.pos */
  2222. lastLLSize =
  2223. ZSTD_ldm_blockCompress(&ldmSeqStore,
  2224. ms, &zc->seqStore,
  2225. zc->blockState.nextCBlock->rep,
  2226. src, srcSize);
  2227. assert(ldmSeqStore.pos == ldmSeqStore.size);
  2228. } else { /* not long range mode */
  2229. ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, dictMode);
  2230. ms->ldmSeqStore = NULL;
  2231. lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
  2232. }
  2233. { const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
  2234. ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
  2235. } }
  2236. return ZSTDbss_compress;
  2237. }
  2238. static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
  2239. {
  2240. const seqStore_t* seqStore = ZSTD_getSeqStore(zc);
  2241. const seqDef* seqStoreSeqs = seqStore->sequencesStart;
  2242. size_t seqStoreSeqSize = seqStore->sequences - seqStoreSeqs;
  2243. size_t seqStoreLiteralsSize = (size_t)(seqStore->lit - seqStore->litStart);
  2244. size_t literalsRead = 0;
  2245. size_t lastLLSize;
  2246. ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex];
  2247. size_t i;
  2248. repcodes_t updatedRepcodes;
  2249. assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences);
  2250. /* Ensure we have enough space for last literals "sequence" */
  2251. assert(zc->seqCollector.maxSequences >= seqStoreSeqSize + 1);
  2252. ZSTD_memcpy(updatedRepcodes.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
  2253. for (i = 0; i < seqStoreSeqSize; ++i) {
  2254. U32 rawOffset = seqStoreSeqs[i].offset - ZSTD_REP_NUM;
  2255. outSeqs[i].litLength = seqStoreSeqs[i].litLength;
  2256. outSeqs[i].matchLength = seqStoreSeqs[i].matchLength + MINMATCH;
  2257. outSeqs[i].rep = 0;
  2258. if (i == seqStore->longLengthPos) {
  2259. if (seqStore->longLengthID == 1) {
  2260. outSeqs[i].litLength += 0x10000;
  2261. } else if (seqStore->longLengthID == 2) {
  2262. outSeqs[i].matchLength += 0x10000;
  2263. }
  2264. }
  2265. if (seqStoreSeqs[i].offset <= ZSTD_REP_NUM) {
  2266. /* Derive the correct offset corresponding to a repcode */
  2267. outSeqs[i].rep = seqStoreSeqs[i].offset;
  2268. if (outSeqs[i].litLength != 0) {
  2269. rawOffset = updatedRepcodes.rep[outSeqs[i].rep - 1];
  2270. } else {
  2271. if (outSeqs[i].rep == 3) {
  2272. rawOffset = updatedRepcodes.rep[0] - 1;
  2273. } else {
  2274. rawOffset = updatedRepcodes.rep[outSeqs[i].rep];
  2275. }
  2276. }
  2277. }
  2278. outSeqs[i].offset = rawOffset;
  2279. /* seqStoreSeqs[i].offset == offCode+1, and ZSTD_updateRep() expects offCode
  2280. so we provide seqStoreSeqs[i].offset - 1 */
  2281. updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep,
  2282. seqStoreSeqs[i].offset - 1,
  2283. seqStoreSeqs[i].litLength == 0);
  2284. literalsRead += outSeqs[i].litLength;
  2285. }
  2286. /* Insert last literals (if any exist) in the block as a sequence with ml == off == 0.
  2287. * If there are no last literals, then we'll emit (of: 0, ml: 0, ll: 0), which is a marker
  2288. * for the block boundary, according to the API.
  2289. */
  2290. assert(seqStoreLiteralsSize >= literalsRead);
  2291. lastLLSize = seqStoreLiteralsSize - literalsRead;
  2292. outSeqs[i].litLength = (U32)lastLLSize;
  2293. outSeqs[i].matchLength = outSeqs[i].offset = outSeqs[i].rep = 0;
  2294. seqStoreSeqSize++;
  2295. zc->seqCollector.seqIndex += seqStoreSeqSize;
  2296. }
  2297. size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
  2298. size_t outSeqsSize, const void* src, size_t srcSize)
  2299. {
  2300. const size_t dstCapacity = ZSTD_compressBound(srcSize);
  2301. void* dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem);
  2302. SeqCollector seqCollector;
  2303. RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!");
  2304. seqCollector.collectSequences = 1;
  2305. seqCollector.seqStart = outSeqs;
  2306. seqCollector.seqIndex = 0;
  2307. seqCollector.maxSequences = outSeqsSize;
  2308. zc->seqCollector = seqCollector;
  2309. ZSTD_compress2(zc, dst, dstCapacity, src, srcSize);
  2310. ZSTD_customFree(dst, ZSTD_defaultCMem);
  2311. return zc->seqCollector.seqIndex;
  2312. }
  2313. size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize) {
  2314. size_t in = 0;
  2315. size_t out = 0;
  2316. for (; in < seqsSize; ++in) {
  2317. if (sequences[in].offset == 0 && sequences[in].matchLength == 0) {
  2318. if (in != seqsSize - 1) {
  2319. sequences[in+1].litLength += sequences[in].litLength;
  2320. }
  2321. } else {
  2322. sequences[out] = sequences[in];
  2323. ++out;
  2324. }
  2325. }
  2326. return out;
  2327. }
  2328. /* Unrolled loop to read four size_ts of input at a time. Returns 1 if is RLE, 0 if not. */
  2329. static int ZSTD_isRLE(const BYTE* src, size_t length) {
  2330. const BYTE* ip = src;
  2331. const BYTE value = ip[0];
  2332. const size_t valueST = (size_t)((U64)value * 0x0101010101010101ULL);
  2333. const size_t unrollSize = sizeof(size_t) * 4;
  2334. const size_t unrollMask = unrollSize - 1;
  2335. const size_t prefixLength = length & unrollMask;
  2336. size_t i;
  2337. size_t u;
  2338. if (length == 1) return 1;
  2339. /* Check if prefix is RLE first before using unrolled loop */
  2340. if (prefixLength && ZSTD_count(ip+1, ip, ip+prefixLength) != prefixLength-1) {
  2341. return 0;
  2342. }
  2343. for (i = prefixLength; i != length; i += unrollSize) {
  2344. for (u = 0; u < unrollSize; u += sizeof(size_t)) {
  2345. if (MEM_readST(ip + i + u) != valueST) {
  2346. return 0;
  2347. }
  2348. }
  2349. }
  2350. return 1;
  2351. }
  2352. /* Returns true if the given block may be RLE.
  2353. * This is just a heuristic based on the compressibility.
  2354. * It may return both false positives and false negatives.
  2355. */
  2356. static int ZSTD_maybeRLE(seqStore_t const* seqStore)
  2357. {
  2358. size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);
  2359. size_t const nbLits = (size_t)(seqStore->lit - seqStore->litStart);
  2360. return nbSeqs < 4 && nbLits < 10;
  2361. }
  2362. static void ZSTD_confirmRepcodesAndEntropyTables(ZSTD_CCtx* zc)
  2363. {
  2364. ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock;
  2365. zc->blockState.prevCBlock = zc->blockState.nextCBlock;
  2366. zc->blockState.nextCBlock = tmp;
  2367. }
  2368. static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
  2369. void* dst, size_t dstCapacity,
  2370. const void* src, size_t srcSize, U32 frame)
  2371. {
  2372. /* This the upper bound for the length of an rle block.
  2373. * This isn't the actual upper bound. Finding the real threshold
  2374. * needs further investigation.
  2375. */
  2376. const U32 rleMaxLength = 25;
  2377. size_t cSize;
  2378. const BYTE* ip = (const BYTE*)src;
  2379. BYTE* op = (BYTE*)dst;
  2380. DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
  2381. (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
  2382. (unsigned)zc->blockState.matchState.nextToUpdate);
  2383. { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
  2384. FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
  2385. if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; }
  2386. }
  2387. if (zc->seqCollector.collectSequences) {
  2388. ZSTD_copyBlockSequences(zc);
  2389. ZSTD_confirmRepcodesAndEntropyTables(zc);
  2390. return 0;
  2391. }
  2392. /* encode sequences and literals */
  2393. cSize = ZSTD_entropyCompressSequences(&zc->seqStore,
  2394. &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
  2395. &zc->appliedParams,
  2396. dst, dstCapacity,
  2397. srcSize,
  2398. zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
  2399. zc->bmi2);
  2400. if (zc->seqCollector.collectSequences) {
  2401. ZSTD_copyBlockSequences(zc);
  2402. return 0;
  2403. }
  2404. if (frame &&
  2405. /* We don't want to emit our first block as a RLE even if it qualifies because
  2406. * doing so will cause the decoder (cli only) to throw a "should consume all input error."
  2407. * This is only an issue for zstd <= v1.4.3
  2408. */
  2409. !zc->isFirstBlock &&
  2410. cSize < rleMaxLength &&
  2411. ZSTD_isRLE(ip, srcSize))
  2412. {
  2413. cSize = 1;
  2414. op[0] = ip[0];
  2415. }
  2416. out:
  2417. if (!ZSTD_isError(cSize) && cSize > 1) {
  2418. ZSTD_confirmRepcodesAndEntropyTables(zc);
  2419. }
  2420. /* We check that dictionaries have offset codes available for the first
  2421. * block. After the first block, the offcode table might not have large
  2422. * enough codes to represent the offsets in the data.
  2423. */
  2424. if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
  2425. zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
  2426. return cSize;
  2427. }
  2428. static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc,
  2429. void* dst, size_t dstCapacity,
  2430. const void* src, size_t srcSize,
  2431. const size_t bss, U32 lastBlock)
  2432. {
  2433. DEBUGLOG(6, "Attempting ZSTD_compressSuperBlock()");
  2434. if (bss == ZSTDbss_compress) {
  2435. if (/* We don't want to emit our first block as a RLE even if it qualifies because
  2436. * doing so will cause the decoder (cli only) to throw a "should consume all input error."
  2437. * This is only an issue for zstd <= v1.4.3
  2438. */
  2439. !zc->isFirstBlock &&
  2440. ZSTD_maybeRLE(&zc->seqStore) &&
  2441. ZSTD_isRLE((BYTE const*)src, srcSize))
  2442. {
  2443. return ZSTD_rleCompressBlock(dst, dstCapacity, *(BYTE const*)src, srcSize, lastBlock);
  2444. }
  2445. /* Attempt superblock compression.
  2446. *
  2447. * Note that compressed size of ZSTD_compressSuperBlock() is not bound by the
  2448. * standard ZSTD_compressBound(). This is a problem, because even if we have
  2449. * space now, taking an extra byte now could cause us to run out of space later
  2450. * and violate ZSTD_compressBound().
  2451. *
  2452. * Define blockBound(blockSize) = blockSize + ZSTD_blockHeaderSize.
  2453. *
  2454. * In order to respect ZSTD_compressBound() we must attempt to emit a raw
  2455. * uncompressed block in these cases:
  2456. * * cSize == 0: Return code for an uncompressed block.
  2457. * * cSize == dstSize_tooSmall: We may have expanded beyond blockBound(srcSize).
  2458. * ZSTD_noCompressBlock() will return dstSize_tooSmall if we are really out of
  2459. * output space.
  2460. * * cSize >= blockBound(srcSize): We have expanded the block too much so
  2461. * emit an uncompressed block.
  2462. */
  2463. {
  2464. size_t const cSize = ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock);
  2465. if (cSize != ERROR(dstSize_tooSmall)) {
  2466. size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy);
  2467. FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed");
  2468. if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) {
  2469. ZSTD_confirmRepcodesAndEntropyTables(zc);
  2470. return cSize;
  2471. }
  2472. }
  2473. }
  2474. }
  2475. DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()");
  2476. /* Superblock compression failed, attempt to emit a single no compress block.
  2477. * The decoder will be able to stream this block since it is uncompressed.
  2478. */
  2479. return ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock);
  2480. }
  2481. static size_t ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx* zc,
  2482. void* dst, size_t dstCapacity,
  2483. const void* src, size_t srcSize,
  2484. U32 lastBlock)
  2485. {
  2486. size_t cSize = 0;
  2487. const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
  2488. DEBUGLOG(5, "ZSTD_compressBlock_targetCBlockSize (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u, srcSize=%zu)",
  2489. (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate, srcSize);
  2490. FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
  2491. cSize = ZSTD_compressBlock_targetCBlockSize_body(zc, dst, dstCapacity, src, srcSize, bss, lastBlock);
  2492. FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize_body failed");
  2493. if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
  2494. zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
  2495. return cSize;
  2496. }
  2497. static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
  2498. ZSTD_cwksp* ws,
  2499. ZSTD_CCtx_params const* params,
  2500. void const* ip,
  2501. void const* iend)
  2502. {
  2503. if (ZSTD_window_needOverflowCorrection(ms->window, iend)) {
  2504. U32 const maxDist = (U32)1 << params->cParams.windowLog;
  2505. U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy);
  2506. U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip);
  2507. ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
  2508. ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
  2509. ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
  2510. ZSTD_cwksp_mark_tables_dirty(ws);
  2511. ZSTD_reduceIndex(ms, params, correction);
  2512. ZSTD_cwksp_mark_tables_clean(ws);
  2513. if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
  2514. else ms->nextToUpdate -= correction;
  2515. /* invalidate dictionaries on overflow correction */
  2516. ms->loadedDictEnd = 0;
  2517. ms->dictMatchState = NULL;
  2518. }
  2519. }
  2520. /*! ZSTD_compress_frameChunk() :
  2521. * Compress a chunk of data into one or multiple blocks.
  2522. * All blocks will be terminated, all input will be consumed.
  2523. * Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
  2524. * Frame is supposed already started (header already produced)
  2525. * @return : compressed size, or an error code
  2526. */
  2527. static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
  2528. void* dst, size_t dstCapacity,
  2529. const void* src, size_t srcSize,
  2530. U32 lastFrameChunk)
  2531. {
  2532. size_t blockSize = cctx->blockSize;
  2533. size_t remaining = srcSize;
  2534. const BYTE* ip = (const BYTE*)src;
  2535. BYTE* const ostart = (BYTE*)dst;
  2536. BYTE* op = ostart;
  2537. U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
  2538. assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX);
  2539. DEBUGLOG(4, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
  2540. if (cctx->appliedParams.fParams.checksumFlag && srcSize)
  2541. XXH64_update(&cctx->xxhState, src, srcSize);
  2542. while (remaining) {
  2543. ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
  2544. U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
  2545. RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE,
  2546. dstSize_tooSmall,
  2547. "not enough space to store compressed block");
  2548. if (remaining < blockSize) blockSize = remaining;
  2549. ZSTD_overflowCorrectIfNeeded(
  2550. ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize);
  2551. ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
  2552. /* Ensure hash/chain table insertion resumes no sooner than lowlimit */
  2553. if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit;
  2554. { size_t cSize;
  2555. if (ZSTD_useTargetCBlockSize(&cctx->appliedParams)) {
  2556. cSize = ZSTD_compressBlock_targetCBlockSize(cctx, op, dstCapacity, ip, blockSize, lastBlock);
  2557. FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize failed");
  2558. assert(cSize > 0);
  2559. assert(cSize <= blockSize + ZSTD_blockHeaderSize);
  2560. } else {
  2561. cSize = ZSTD_compressBlock_internal(cctx,
  2562. op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
  2563. ip, blockSize, 1 /* frame */);
  2564. FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_internal failed");
  2565. if (cSize == 0) { /* block is not compressible */
  2566. cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
  2567. FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
  2568. } else {
  2569. U32 const cBlockHeader = cSize == 1 ?
  2570. lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) :
  2571. lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
  2572. MEM_writeLE24(op, cBlockHeader);
  2573. cSize += ZSTD_blockHeaderSize;
  2574. }
  2575. }
  2576. ip += blockSize;
  2577. assert(remaining >= blockSize);
  2578. remaining -= blockSize;
  2579. op += cSize;
  2580. assert(dstCapacity >= cSize);
  2581. dstCapacity -= cSize;
  2582. cctx->isFirstBlock = 0;
  2583. DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
  2584. (unsigned)cSize);
  2585. } }
  2586. if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
  2587. return (size_t)(op-ostart);
  2588. }
  2589. static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
  2590. const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID)
  2591. { BYTE* const op = (BYTE*)dst;
  2592. U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */
  2593. U32 const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */
  2594. U32 const checksumFlag = params->fParams.checksumFlag>0;
  2595. U32 const windowSize = (U32)1 << params->cParams.windowLog;
  2596. U32 const singleSegment = params->fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
  2597. BYTE const windowLogByte = (BYTE)((params->cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
  2598. U32 const fcsCode = params->fParams.contentSizeFlag ?
  2599. (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0; /* 0-3 */
  2600. BYTE const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
  2601. size_t pos=0;
  2602. assert(!(params->fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
  2603. RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall,
  2604. "dst buf is too small to fit worst-case frame header size.");
  2605. DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
  2606. !params->fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
  2607. if (params->format == ZSTD_f_zstd1) {
  2608. MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
  2609. pos = 4;
  2610. }
  2611. op[pos++] = frameHeaderDescriptionByte;
  2612. if (!singleSegment) op[pos++] = windowLogByte;
  2613. switch(dictIDSizeCode)
  2614. {
  2615. default: assert(0); /* impossible */
  2616. case 0 : break;
  2617. case 1 : op[pos] = (BYTE)(dictID); pos++; break;
  2618. case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break;
  2619. case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break;
  2620. }
  2621. switch(fcsCode)
  2622. {
  2623. default: assert(0); /* impossible */
  2624. case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break;
  2625. case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break;
  2626. case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break;
  2627. case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break;
  2628. }
  2629. return pos;
  2630. }
  2631. /* ZSTD_writeLastEmptyBlock() :
  2632. * output an empty Block with end-of-frame mark to complete a frame
  2633. * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
  2634. * or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
  2635. */
  2636. size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
  2637. {
  2638. RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall,
  2639. "dst buf is too small to write frame trailer empty block.");
  2640. { U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1); /* 0 size */
  2641. MEM_writeLE24(dst, cBlockHeader24);
  2642. return ZSTD_blockHeaderSize;
  2643. }
  2644. }
  2645. size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
  2646. {
  2647. RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong,
  2648. "wrong cctx stage");
  2649. RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm,
  2650. parameter_unsupported,
  2651. "incompatible with ldm");
  2652. cctx->externSeqStore.seq = seq;
  2653. cctx->externSeqStore.size = nbSeq;
  2654. cctx->externSeqStore.capacity = nbSeq;
  2655. cctx->externSeqStore.pos = 0;
  2656. cctx->externSeqStore.posInSequence = 0;
  2657. return 0;
  2658. }
  2659. static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
  2660. void* dst, size_t dstCapacity,
  2661. const void* src, size_t srcSize,
  2662. U32 frame, U32 lastFrameChunk)
  2663. {
  2664. ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
  2665. size_t fhSize = 0;
  2666. DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
  2667. cctx->stage, (unsigned)srcSize);
  2668. RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong,
  2669. "missing init (ZSTD_compressBegin)");
  2670. if (frame && (cctx->stage==ZSTDcs_init)) {
  2671. fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams,
  2672. cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
  2673. FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
  2674. assert(fhSize <= dstCapacity);
  2675. dstCapacity -= fhSize;
  2676. dst = (char*)dst + fhSize;
  2677. cctx->stage = ZSTDcs_ongoing;
  2678. }
  2679. if (!srcSize) return fhSize; /* do not generate an empty block if no input */
  2680. if (!ZSTD_window_update(&ms->window, src, srcSize)) {
  2681. ms->nextToUpdate = ms->window.dictLimit;
  2682. }
  2683. if (cctx->appliedParams.ldmParams.enableLdm) {
  2684. ZSTD_window_update(&cctx->ldmState.window, src, srcSize);
  2685. }
  2686. if (!frame) {
  2687. /* overflow check and correction for block mode */
  2688. ZSTD_overflowCorrectIfNeeded(
  2689. ms, &cctx->workspace, &cctx->appliedParams,
  2690. src, (BYTE const*)src + srcSize);
  2691. }
  2692. DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
  2693. { size_t const cSize = frame ?
  2694. ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
  2695. ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */);
  2696. FORWARD_IF_ERROR(cSize, "%s", frame ? "ZSTD_compress_frameChunk failed" : "ZSTD_compressBlock_internal failed");
  2697. cctx->consumedSrcSize += srcSize;
  2698. cctx->producedCSize += (cSize + fhSize);
  2699. assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
  2700. if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
  2701. ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
  2702. RETURN_ERROR_IF(
  2703. cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne,
  2704. srcSize_wrong,
  2705. "error : pledgedSrcSize = %u, while realSrcSize >= %u",
  2706. (unsigned)cctx->pledgedSrcSizePlusOne-1,
  2707. (unsigned)cctx->consumedSrcSize);
  2708. }
  2709. return cSize + fhSize;
  2710. }
  2711. }
  2712. size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
  2713. void* dst, size_t dstCapacity,
  2714. const void* src, size_t srcSize)
  2715. {
  2716. DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize);
  2717. return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
  2718. }
  2719. size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
  2720. {
  2721. ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams;
  2722. assert(!ZSTD_checkCParams(cParams));
  2723. return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog);
  2724. }
  2725. size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
  2726. {
  2727. DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize);
  2728. { size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
  2729. RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong, "input is larger than a block"); }
  2730. return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
  2731. }
  2732. /*! ZSTD_loadDictionaryContent() :
  2733. * @return : 0, or an error code
  2734. */
  2735. static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
  2736. ldmState_t* ls,
  2737. ZSTD_cwksp* ws,
  2738. ZSTD_CCtx_params const* params,
  2739. const void* src, size_t srcSize,
  2740. ZSTD_dictTableLoadMethod_e dtlm)
  2741. {
  2742. const BYTE* ip = (const BYTE*) src;
  2743. const BYTE* const iend = ip + srcSize;
  2744. ZSTD_window_update(&ms->window, src, srcSize);
  2745. ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
  2746. if (params->ldmParams.enableLdm && ls != NULL) {
  2747. ZSTD_window_update(&ls->window, src, srcSize);
  2748. ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base);
  2749. }
  2750. /* Assert that we the ms params match the params we're being given */
  2751. ZSTD_assertEqualCParams(params->cParams, ms->cParams);
  2752. if (srcSize <= HASH_READ_SIZE) return 0;
  2753. while (iend - ip > HASH_READ_SIZE) {
  2754. size_t const remaining = (size_t)(iend - ip);
  2755. size_t const chunk = MIN(remaining, ZSTD_CHUNKSIZE_MAX);
  2756. const BYTE* const ichunk = ip + chunk;
  2757. ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, ichunk);
  2758. if (params->ldmParams.enableLdm && ls != NULL)
  2759. ZSTD_ldm_fillHashTable(ls, (const BYTE*)src, (const BYTE*)src + srcSize, &params->ldmParams);
  2760. switch(params->cParams.strategy)
  2761. {
  2762. case ZSTD_fast:
  2763. ZSTD_fillHashTable(ms, ichunk, dtlm);
  2764. break;
  2765. case ZSTD_dfast:
  2766. ZSTD_fillDoubleHashTable(ms, ichunk, dtlm);
  2767. break;
  2768. case ZSTD_greedy:
  2769. case ZSTD_lazy:
  2770. case ZSTD_lazy2:
  2771. if (chunk >= HASH_READ_SIZE && ms->dedicatedDictSearch) {
  2772. assert(chunk == remaining); /* must load everything in one go */
  2773. ZSTD_dedicatedDictSearch_lazy_loadDictionary(ms, ichunk-HASH_READ_SIZE);
  2774. } else if (chunk >= HASH_READ_SIZE) {
  2775. ZSTD_insertAndFindFirstIndex(ms, ichunk-HASH_READ_SIZE);
  2776. }
  2777. break;
  2778. case ZSTD_btlazy2: /* we want the dictionary table fully sorted */
  2779. case ZSTD_btopt:
  2780. case ZSTD_btultra:
  2781. case ZSTD_btultra2:
  2782. if (chunk >= HASH_READ_SIZE)
  2783. ZSTD_updateTree(ms, ichunk-HASH_READ_SIZE, ichunk);
  2784. break;
  2785. default:
  2786. assert(0); /* not possible : not a valid strategy id */
  2787. }
  2788. ip = ichunk;
  2789. }
  2790. ms->nextToUpdate = (U32)(iend - ms->window.base);
  2791. return 0;
  2792. }
  2793. /* Dictionaries that assign zero probability to symbols that show up causes problems
  2794. * when FSE encoding. Mark dictionaries with zero probability symbols as FSE_repeat_check
  2795. * and only dictionaries with 100% valid symbols can be assumed valid.
  2796. */
  2797. static FSE_repeat ZSTD_dictNCountRepeat(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue)
  2798. {
  2799. U32 s;
  2800. if (dictMaxSymbolValue < maxSymbolValue) {
  2801. return FSE_repeat_check;
  2802. }
  2803. for (s = 0; s <= maxSymbolValue; ++s) {
  2804. if (normalizedCounter[s] == 0) {
  2805. return FSE_repeat_check;
  2806. }
  2807. }
  2808. return FSE_repeat_valid;
  2809. }
  2810. size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
  2811. const void* const dict, size_t dictSize)
  2812. {
  2813. short offcodeNCount[MaxOff+1];
  2814. unsigned offcodeMaxValue = MaxOff;
  2815. const BYTE* dictPtr = (const BYTE*)dict; /* skip magic num and dict ID */
  2816. const BYTE* const dictEnd = dictPtr + dictSize;
  2817. dictPtr += 8;
  2818. bs->entropy.huf.repeatMode = HUF_repeat_check;
  2819. { unsigned maxSymbolValue = 255;
  2820. unsigned hasZeroWeights = 1;
  2821. size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr,
  2822. dictEnd-dictPtr, &hasZeroWeights);
  2823. /* We only set the loaded table as valid if it contains all non-zero
  2824. * weights. Otherwise, we set it to check */
  2825. if (!hasZeroWeights)
  2826. bs->entropy.huf.repeatMode = HUF_repeat_valid;
  2827. RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted, "");
  2828. RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted, "");
  2829. dictPtr += hufHeaderSize;
  2830. }
  2831. { unsigned offcodeLog;
  2832. size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
  2833. RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");
  2834. RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");
  2835. /* fill all offset symbols to avoid garbage at end of table */
  2836. RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
  2837. bs->entropy.fse.offcodeCTable,
  2838. offcodeNCount, MaxOff, offcodeLog,
  2839. workspace, HUF_WORKSPACE_SIZE)),
  2840. dictionary_corrupted, "");
  2841. /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
  2842. dictPtr += offcodeHeaderSize;
  2843. }
  2844. { short matchlengthNCount[MaxML+1];
  2845. unsigned matchlengthMaxValue = MaxML, matchlengthLog;
  2846. size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
  2847. RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");
  2848. RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
  2849. RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
  2850. bs->entropy.fse.matchlengthCTable,
  2851. matchlengthNCount, matchlengthMaxValue, matchlengthLog,
  2852. workspace, HUF_WORKSPACE_SIZE)),
  2853. dictionary_corrupted, "");
  2854. bs->entropy.fse.matchlength_repeatMode = ZSTD_dictNCountRepeat(matchlengthNCount, matchlengthMaxValue, MaxML);
  2855. dictPtr += matchlengthHeaderSize;
  2856. }
  2857. { short litlengthNCount[MaxLL+1];
  2858. unsigned litlengthMaxValue = MaxLL, litlengthLog;
  2859. size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
  2860. RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");
  2861. RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
  2862. RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
  2863. bs->entropy.fse.litlengthCTable,
  2864. litlengthNCount, litlengthMaxValue, litlengthLog,
  2865. workspace, HUF_WORKSPACE_SIZE)),
  2866. dictionary_corrupted, "");
  2867. bs->entropy.fse.litlength_repeatMode = ZSTD_dictNCountRepeat(litlengthNCount, litlengthMaxValue, MaxLL);
  2868. dictPtr += litlengthHeaderSize;
  2869. }
  2870. RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, "");
  2871. bs->rep[0] = MEM_readLE32(dictPtr+0);
  2872. bs->rep[1] = MEM_readLE32(dictPtr+4);
  2873. bs->rep[2] = MEM_readLE32(dictPtr+8);
  2874. dictPtr += 12;
  2875. { size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
  2876. U32 offcodeMax = MaxOff;
  2877. if (dictContentSize <= ((U32)-1) - 128 KB) {
  2878. U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
  2879. offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
  2880. }
  2881. /* All offset values <= dictContentSize + 128 KB must be representable for a valid table */
  2882. bs->entropy.fse.offcode_repeatMode = ZSTD_dictNCountRepeat(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff));
  2883. /* All repCodes must be <= dictContentSize and != 0 */
  2884. { U32 u;
  2885. for (u=0; u<3; u++) {
  2886. RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted, "");
  2887. RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted, "");
  2888. } } }
  2889. return dictPtr - (const BYTE*)dict;
  2890. }
  2891. /* Dictionary format :
  2892. * See :
  2893. * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#dictionary-format
  2894. */
  2895. /*! ZSTD_loadZstdDictionary() :
  2896. * @return : dictID, or an error code
  2897. * assumptions : magic number supposed already checked
  2898. * dictSize supposed >= 8
  2899. */
  2900. static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
  2901. ZSTD_matchState_t* ms,
  2902. ZSTD_cwksp* ws,
  2903. ZSTD_CCtx_params const* params,
  2904. const void* dict, size_t dictSize,
  2905. ZSTD_dictTableLoadMethod_e dtlm,
  2906. void* workspace)
  2907. {
  2908. const BYTE* dictPtr = (const BYTE*)dict;
  2909. const BYTE* const dictEnd = dictPtr + dictSize;
  2910. size_t dictID;
  2911. size_t eSize;
  2912. ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
  2913. assert(dictSize >= 8);
  2914. assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY);
  2915. dictID = params->fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr + 4 /* skip magic number */ );
  2916. eSize = ZSTD_loadCEntropy(bs, workspace, dict, dictSize);
  2917. FORWARD_IF_ERROR(eSize, "ZSTD_loadCEntropy failed");
  2918. dictPtr += eSize;
  2919. {
  2920. size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
  2921. FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(
  2922. ms, NULL, ws, params, dictPtr, dictContentSize, dtlm), "");
  2923. }
  2924. return dictID;
  2925. }
  2926. /** ZSTD_compress_insertDictionary() :
  2927. * @return : dictID, or an error code */
  2928. static size_t
  2929. ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
  2930. ZSTD_matchState_t* ms,
  2931. ldmState_t* ls,
  2932. ZSTD_cwksp* ws,
  2933. const ZSTD_CCtx_params* params,
  2934. const void* dict, size_t dictSize,
  2935. ZSTD_dictContentType_e dictContentType,
  2936. ZSTD_dictTableLoadMethod_e dtlm,
  2937. void* workspace)
  2938. {
  2939. DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
  2940. if ((dict==NULL) || (dictSize<8)) {
  2941. RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
  2942. return 0;
  2943. }
  2944. ZSTD_reset_compressedBlockState(bs);
  2945. /* dict restricted modes */
  2946. if (dictContentType == ZSTD_dct_rawContent)
  2947. return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm);
  2948. if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
  2949. if (dictContentType == ZSTD_dct_auto) {
  2950. DEBUGLOG(4, "raw content dictionary detected");
  2951. return ZSTD_loadDictionaryContent(
  2952. ms, ls, ws, params, dict, dictSize, dtlm);
  2953. }
  2954. RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
  2955. assert(0); /* impossible */
  2956. }
  2957. /* dict as full zstd dictionary */
  2958. return ZSTD_loadZstdDictionary(
  2959. bs, ms, ws, params, dict, dictSize, dtlm, workspace);
  2960. }
  2961. #define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB)
  2962. #define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6ULL)
  2963. /*! ZSTD_compressBegin_internal() :
  2964. * @return : 0, or an error code */
  2965. static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
  2966. const void* dict, size_t dictSize,
  2967. ZSTD_dictContentType_e dictContentType,
  2968. ZSTD_dictTableLoadMethod_e dtlm,
  2969. const ZSTD_CDict* cdict,
  2970. const ZSTD_CCtx_params* params, U64 pledgedSrcSize,
  2971. ZSTD_buffered_policy_e zbuff)
  2972. {
  2973. DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params->cParams.windowLog);
  2974. /* params are supposed to be fully validated at this point */
  2975. assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
  2976. assert(!((dict) && (cdict))); /* either dict or cdict, not both */
  2977. if ( (cdict)
  2978. && (cdict->dictContentSize > 0)
  2979. && ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
  2980. || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
  2981. || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
  2982. || cdict->compressionLevel == 0)
  2983. && (params->attachDictPref != ZSTD_dictForceLoad) ) {
  2984. return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
  2985. }
  2986. FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, *params, pledgedSrcSize,
  2987. ZSTDcrp_makeClean, zbuff) , "");
  2988. { size_t const dictID = cdict ?
  2989. ZSTD_compress_insertDictionary(
  2990. cctx->blockState.prevCBlock, &cctx->blockState.matchState,
  2991. &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent,
  2992. cdict->dictContentSize, cdict->dictContentType, dtlm,
  2993. cctx->entropyWorkspace)
  2994. : ZSTD_compress_insertDictionary(
  2995. cctx->blockState.prevCBlock, &cctx->blockState.matchState,
  2996. &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize,
  2997. dictContentType, dtlm, cctx->entropyWorkspace);
  2998. FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
  2999. assert(dictID <= UINT_MAX);
  3000. cctx->dictID = (U32)dictID;
  3001. }
  3002. return 0;
  3003. }
  3004. size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
  3005. const void* dict, size_t dictSize,
  3006. ZSTD_dictContentType_e dictContentType,
  3007. ZSTD_dictTableLoadMethod_e dtlm,
  3008. const ZSTD_CDict* cdict,
  3009. const ZSTD_CCtx_params* params,
  3010. unsigned long long pledgedSrcSize)
  3011. {
  3012. DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params->cParams.windowLog);
  3013. /* compression parameters verification and optimization */
  3014. FORWARD_IF_ERROR( ZSTD_checkCParams(params->cParams) , "");
  3015. return ZSTD_compressBegin_internal(cctx,
  3016. dict, dictSize, dictContentType, dtlm,
  3017. cdict,
  3018. params, pledgedSrcSize,
  3019. ZSTDb_not_buffered);
  3020. }
  3021. /*! ZSTD_compressBegin_advanced() :
  3022. * @return : 0, or an error code */
  3023. size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
  3024. const void* dict, size_t dictSize,
  3025. ZSTD_parameters params, unsigned long long pledgedSrcSize)
  3026. {
  3027. ZSTD_CCtx_params const cctxParams =
  3028. ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, &params);
  3029. return ZSTD_compressBegin_advanced_internal(cctx,
  3030. dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast,
  3031. NULL /*cdict*/,
  3032. &cctxParams, pledgedSrcSize);
  3033. }
  3034. size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
  3035. {
  3036. ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict);
  3037. ZSTD_CCtx_params const cctxParams =
  3038. ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, &params);
  3039. DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize);
  3040. return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
  3041. &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
  3042. }
  3043. size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
  3044. {
  3045. return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
  3046. }
  3047. /*! ZSTD_writeEpilogue() :
  3048. * Ends a frame.
  3049. * @return : nb of bytes written into dst (or an error code) */
  3050. static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
  3051. {
  3052. BYTE* const ostart = (BYTE*)dst;
  3053. BYTE* op = ostart;
  3054. size_t fhSize = 0;
  3055. DEBUGLOG(4, "ZSTD_writeEpilogue");
  3056. RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
  3057. /* special case : empty frame */
  3058. if (cctx->stage == ZSTDcs_init) {
  3059. fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
  3060. FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
  3061. dstCapacity -= fhSize;
  3062. op += fhSize;
  3063. cctx->stage = ZSTDcs_ongoing;
  3064. }
  3065. if (cctx->stage != ZSTDcs_ending) {
  3066. /* write one last empty block, make it the "last" block */
  3067. U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
  3068. RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for epilogue");
  3069. MEM_writeLE32(op, cBlockHeader24);
  3070. op += ZSTD_blockHeaderSize;
  3071. dstCapacity -= ZSTD_blockHeaderSize;
  3072. }
  3073. if (cctx->appliedParams.fParams.checksumFlag) {
  3074. U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
  3075. RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
  3076. DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum);
  3077. MEM_writeLE32(op, checksum);
  3078. op += 4;
  3079. }
  3080. cctx->stage = ZSTDcs_created; /* return to "created but no init" status */
  3081. return op-ostart;
  3082. }
  3083. size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
  3084. void* dst, size_t dstCapacity,
  3085. const void* src, size_t srcSize)
  3086. {
  3087. size_t endResult;
  3088. size_t const cSize = ZSTD_compressContinue_internal(cctx,
  3089. dst, dstCapacity, src, srcSize,
  3090. 1 /* frame mode */, 1 /* last chunk */);
  3091. FORWARD_IF_ERROR(cSize, "ZSTD_compressContinue_internal failed");
  3092. endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
  3093. FORWARD_IF_ERROR(endResult, "ZSTD_writeEpilogue failed");
  3094. assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
  3095. if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
  3096. ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
  3097. DEBUGLOG(4, "end of frame : controlling src size");
  3098. RETURN_ERROR_IF(
  3099. cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1,
  3100. srcSize_wrong,
  3101. "error : pledgedSrcSize = %u, while realSrcSize = %u",
  3102. (unsigned)cctx->pledgedSrcSizePlusOne-1,
  3103. (unsigned)cctx->consumedSrcSize);
  3104. }
  3105. return cSize + endResult;
  3106. }
  3107. static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx,
  3108. void* dst, size_t dstCapacity,
  3109. const void* src, size_t srcSize,
  3110. const void* dict,size_t dictSize,
  3111. const ZSTD_parameters* params)
  3112. {
  3113. ZSTD_CCtx_params const cctxParams =
  3114. ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, params);
  3115. DEBUGLOG(4, "ZSTD_compress_internal");
  3116. return ZSTD_compress_advanced_internal(cctx,
  3117. dst, dstCapacity,
  3118. src, srcSize,
  3119. dict, dictSize,
  3120. &cctxParams);
  3121. }
  3122. size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
  3123. void* dst, size_t dstCapacity,
  3124. const void* src, size_t srcSize,
  3125. const void* dict,size_t dictSize,
  3126. ZSTD_parameters params)
  3127. {
  3128. DEBUGLOG(4, "ZSTD_compress_advanced");
  3129. FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), "");
  3130. return ZSTD_compress_internal(cctx,
  3131. dst, dstCapacity,
  3132. src, srcSize,
  3133. dict, dictSize,
  3134. &params);
  3135. }
  3136. /* Internal */
  3137. size_t ZSTD_compress_advanced_internal(
  3138. ZSTD_CCtx* cctx,
  3139. void* dst, size_t dstCapacity,
  3140. const void* src, size_t srcSize,
  3141. const void* dict,size_t dictSize,
  3142. const ZSTD_CCtx_params* params)
  3143. {
  3144. DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
  3145. FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
  3146. dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
  3147. params, srcSize, ZSTDb_not_buffered) , "");
  3148. return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
  3149. }
  3150. size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
  3151. void* dst, size_t dstCapacity,
  3152. const void* src, size_t srcSize,
  3153. const void* dict, size_t dictSize,
  3154. int compressionLevel)
  3155. {
  3156. ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0, ZSTD_cpm_noAttachDict);
  3157. ZSTD_CCtx_params cctxParams = ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, &params);
  3158. DEBUGLOG(4, "ZSTD_compress_usingDict (srcSize=%u)", (unsigned)srcSize);
  3159. assert(params.fParams.contentSizeFlag == 1);
  3160. return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctxParams);
  3161. }
  3162. size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
  3163. void* dst, size_t dstCapacity,
  3164. const void* src, size_t srcSize,
  3165. int compressionLevel)
  3166. {
  3167. DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize);
  3168. assert(cctx != NULL);
  3169. return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
  3170. }
  3171. size_t ZSTD_compress(void* dst, size_t dstCapacity,
  3172. const void* src, size_t srcSize,
  3173. int compressionLevel)
  3174. {
  3175. size_t result;
  3176. #if ZSTD_COMPRESS_HEAPMODE
  3177. ZSTD_CCtx* cctx = ZSTD_createCCtx();
  3178. RETURN_ERROR_IF(!cctx, memory_allocation, "ZSTD_createCCtx failed");
  3179. result = ZSTD_compressCCtx(cctx, dst, dstCapacity, src, srcSize, compressionLevel);
  3180. ZSTD_freeCCtx(cctx);
  3181. #else
  3182. ZSTD_CCtx ctxBody;
  3183. ZSTD_initCCtx(&ctxBody, ZSTD_defaultCMem);
  3184. result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel);
  3185. ZSTD_freeCCtxContent(&ctxBody); /* can't free ctxBody itself, as it's on stack; free only heap content */
  3186. #endif
  3187. return result;
  3188. }
  3189. /* ===== Dictionary API ===== */
  3190. /*! ZSTD_estimateCDictSize_advanced() :
  3191. * Estimate amount of memory that will be needed to create a dictionary with following arguments */
  3192. size_t ZSTD_estimateCDictSize_advanced(
  3193. size_t dictSize, ZSTD_compressionParameters cParams,
  3194. ZSTD_dictLoadMethod_e dictLoadMethod)
  3195. {
  3196. DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict));
  3197. return ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
  3198. + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
  3199. + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0)
  3200. + (dictLoadMethod == ZSTD_dlm_byRef ? 0
  3201. : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *))));
  3202. }
  3203. size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
  3204. {
  3205. ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
  3206. return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy);
  3207. }
  3208. size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
  3209. {
  3210. if (cdict==NULL) return 0; /* support sizeof on NULL */
  3211. DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict));
  3212. /* cdict may be in the workspace */
  3213. return (cdict->workspace.workspace == cdict ? 0 : sizeof(*cdict))
  3214. + ZSTD_cwksp_sizeof(&cdict->workspace);
  3215. }
  3216. static size_t ZSTD_initCDict_internal(
  3217. ZSTD_CDict* cdict,
  3218. const void* dictBuffer, size_t dictSize,
  3219. ZSTD_dictLoadMethod_e dictLoadMethod,
  3220. ZSTD_dictContentType_e dictContentType,
  3221. ZSTD_CCtx_params params)
  3222. {
  3223. DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType);
  3224. assert(!ZSTD_checkCParams(params.cParams));
  3225. cdict->matchState.cParams = params.cParams;
  3226. cdict->matchState.dedicatedDictSearch = params.enableDedicatedDictSearch;
  3227. if (cdict->matchState.dedicatedDictSearch && dictSize > ZSTD_CHUNKSIZE_MAX) {
  3228. cdict->matchState.dedicatedDictSearch = 0;
  3229. }
  3230. if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
  3231. cdict->dictContent = dictBuffer;
  3232. } else {
  3233. void *internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, sizeof(void*)));
  3234. RETURN_ERROR_IF(!internalBuffer, memory_allocation, "NULL pointer!");
  3235. cdict->dictContent = internalBuffer;
  3236. ZSTD_memcpy(internalBuffer, dictBuffer, dictSize);
  3237. }
  3238. cdict->dictContentSize = dictSize;
  3239. cdict->dictContentType = dictContentType;
  3240. cdict->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cdict->workspace, HUF_WORKSPACE_SIZE);
  3241. /* Reset the state to no dictionary */
  3242. ZSTD_reset_compressedBlockState(&cdict->cBlockState);
  3243. FORWARD_IF_ERROR(ZSTD_reset_matchState(
  3244. &cdict->matchState,
  3245. &cdict->workspace,
  3246. &params.cParams,
  3247. ZSTDcrp_makeClean,
  3248. ZSTDirp_reset,
  3249. ZSTD_resetTarget_CDict), "");
  3250. /* (Maybe) load the dictionary
  3251. * Skips loading the dictionary if it is < 8 bytes.
  3252. */
  3253. { params.compressionLevel = ZSTD_CLEVEL_DEFAULT;
  3254. params.fParams.contentSizeFlag = 1;
  3255. { size_t const dictID = ZSTD_compress_insertDictionary(
  3256. &cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace,
  3257. &params, cdict->dictContent, cdict->dictContentSize,
  3258. dictContentType, ZSTD_dtlm_full, cdict->entropyWorkspace);
  3259. FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
  3260. assert(dictID <= (size_t)(U32)-1);
  3261. cdict->dictID = (U32)dictID;
  3262. }
  3263. }
  3264. return 0;
  3265. }
  3266. static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize,
  3267. ZSTD_dictLoadMethod_e dictLoadMethod,
  3268. ZSTD_compressionParameters cParams, ZSTD_customMem customMem)
  3269. {
  3270. if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
  3271. { size_t const workspaceSize =
  3272. ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) +
  3273. ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) +
  3274. ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0) +
  3275. (dictLoadMethod == ZSTD_dlm_byRef ? 0
  3276. : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))));
  3277. void* const workspace = ZSTD_customMalloc(workspaceSize, customMem);
  3278. ZSTD_cwksp ws;
  3279. ZSTD_CDict* cdict;
  3280. if (!workspace) {
  3281. ZSTD_customFree(workspace, customMem);
  3282. return NULL;
  3283. }
  3284. ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_dynamic_alloc);
  3285. cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
  3286. assert(cdict != NULL);
  3287. ZSTD_cwksp_move(&cdict->workspace, &ws);
  3288. cdict->customMem = customMem;
  3289. cdict->compressionLevel = 0; /* signals advanced API usage */
  3290. return cdict;
  3291. }
  3292. }
  3293. ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
  3294. ZSTD_dictLoadMethod_e dictLoadMethod,
  3295. ZSTD_dictContentType_e dictContentType,
  3296. ZSTD_compressionParameters cParams,
  3297. ZSTD_customMem customMem)
  3298. {
  3299. ZSTD_CCtx_params cctxParams;
  3300. ZSTD_memset(&cctxParams, 0, sizeof(cctxParams));
  3301. ZSTD_CCtxParams_init(&cctxParams, 0);
  3302. cctxParams.cParams = cParams;
  3303. cctxParams.customMem = customMem;
  3304. return ZSTD_createCDict_advanced2(
  3305. dictBuffer, dictSize,
  3306. dictLoadMethod, dictContentType,
  3307. &cctxParams, customMem);
  3308. }
  3309. ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced2(
  3310. const void* dict, size_t dictSize,
  3311. ZSTD_dictLoadMethod_e dictLoadMethod,
  3312. ZSTD_dictContentType_e dictContentType,
  3313. const ZSTD_CCtx_params* originalCctxParams,
  3314. ZSTD_customMem customMem)
  3315. {
  3316. ZSTD_CCtx_params cctxParams = *originalCctxParams;
  3317. ZSTD_compressionParameters cParams;
  3318. ZSTD_CDict* cdict;
  3319. DEBUGLOG(3, "ZSTD_createCDict_advanced2, mode %u", (unsigned)dictContentType);
  3320. if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
  3321. if (cctxParams.enableDedicatedDictSearch) {
  3322. cParams = ZSTD_dedicatedDictSearch_getCParams(
  3323. cctxParams.compressionLevel, dictSize);
  3324. ZSTD_overrideCParams(&cParams, &cctxParams.cParams);
  3325. } else {
  3326. cParams = ZSTD_getCParamsFromCCtxParams(
  3327. &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
  3328. }
  3329. if (!ZSTD_dedicatedDictSearch_isSupported(&cParams)) {
  3330. /* Fall back to non-DDSS params */
  3331. cctxParams.enableDedicatedDictSearch = 0;
  3332. cParams = ZSTD_getCParamsFromCCtxParams(
  3333. &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
  3334. }
  3335. cctxParams.cParams = cParams;
  3336. cdict = ZSTD_createCDict_advanced_internal(dictSize,
  3337. dictLoadMethod, cctxParams.cParams,
  3338. customMem);
  3339. if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
  3340. dict, dictSize,
  3341. dictLoadMethod, dictContentType,
  3342. cctxParams) )) {
  3343. ZSTD_freeCDict(cdict);
  3344. return NULL;
  3345. }
  3346. return cdict;
  3347. }
  3348. ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
  3349. {
  3350. ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
  3351. ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
  3352. ZSTD_dlm_byCopy, ZSTD_dct_auto,
  3353. cParams, ZSTD_defaultCMem);
  3354. if (cdict)
  3355. cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
  3356. return cdict;
  3357. }
  3358. ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
  3359. {
  3360. ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
  3361. ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
  3362. ZSTD_dlm_byRef, ZSTD_dct_auto,
  3363. cParams, ZSTD_defaultCMem);
  3364. if (cdict)
  3365. cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
  3366. return cdict;
  3367. }
  3368. size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
  3369. {
  3370. if (cdict==NULL) return 0; /* support free on NULL */
  3371. { ZSTD_customMem const cMem = cdict->customMem;
  3372. int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict);
  3373. ZSTD_cwksp_free(&cdict->workspace, cMem);
  3374. if (!cdictInWorkspace) {
  3375. ZSTD_customFree(cdict, cMem);
  3376. }
  3377. return 0;
  3378. }
  3379. }
  3380. /*! ZSTD_initStaticCDict_advanced() :
  3381. * Generate a digested dictionary in provided memory area.
  3382. * workspace: The memory area to emplace the dictionary into.
  3383. * Provided pointer must 8-bytes aligned.
  3384. * It must outlive dictionary usage.
  3385. * workspaceSize: Use ZSTD_estimateCDictSize()
  3386. * to determine how large workspace must be.
  3387. * cParams : use ZSTD_getCParams() to transform a compression level
  3388. * into its relevants cParams.
  3389. * @return : pointer to ZSTD_CDict*, or NULL if error (size too small)
  3390. * Note : there is no corresponding "free" function.
  3391. * Since workspace was allocated externally, it must be freed externally.
  3392. */
  3393. const ZSTD_CDict* ZSTD_initStaticCDict(
  3394. void* workspace, size_t workspaceSize,
  3395. const void* dict, size_t dictSize,
  3396. ZSTD_dictLoadMethod_e dictLoadMethod,
  3397. ZSTD_dictContentType_e dictContentType,
  3398. ZSTD_compressionParameters cParams)
  3399. {
  3400. size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);
  3401. size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
  3402. + (dictLoadMethod == ZSTD_dlm_byRef ? 0
  3403. : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))))
  3404. + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
  3405. + matchStateSize;
  3406. ZSTD_CDict* cdict;
  3407. ZSTD_CCtx_params params;
  3408. if ((size_t)workspace & 7) return NULL; /* 8-aligned */
  3409. {
  3410. ZSTD_cwksp ws;
  3411. ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
  3412. cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
  3413. if (cdict == NULL) return NULL;
  3414. ZSTD_cwksp_move(&cdict->workspace, &ws);
  3415. }
  3416. DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
  3417. (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));
  3418. if (workspaceSize < neededSize) return NULL;
  3419. ZSTD_CCtxParams_init(&params, 0);
  3420. params.cParams = cParams;
  3421. if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
  3422. dict, dictSize,
  3423. dictLoadMethod, dictContentType,
  3424. params) ))
  3425. return NULL;
  3426. return cdict;
  3427. }
  3428. ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)
  3429. {
  3430. assert(cdict != NULL);
  3431. return cdict->matchState.cParams;
  3432. }
  3433. /*! ZSTD_getDictID_fromCDict() :
  3434. * Provides the dictID of the dictionary loaded into `cdict`.
  3435. * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
  3436. * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
  3437. unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict)
  3438. {
  3439. if (cdict==NULL) return 0;
  3440. return cdict->dictID;
  3441. }
  3442. /* ZSTD_compressBegin_usingCDict_advanced() :
  3443. * cdict must be != NULL */
  3444. size_t ZSTD_compressBegin_usingCDict_advanced(
  3445. ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
  3446. ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
  3447. {
  3448. DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced");
  3449. RETURN_ERROR_IF(cdict==NULL, dictionary_wrong, "NULL pointer!");
  3450. { ZSTD_CCtx_params params = cctx->requestedParams;
  3451. params.cParams = ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
  3452. || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
  3453. || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
  3454. || cdict->compressionLevel == 0 )
  3455. && (params.attachDictPref != ZSTD_dictForceLoad) ?
  3456. ZSTD_getCParamsFromCDict(cdict)
  3457. : ZSTD_getCParams(cdict->compressionLevel,
  3458. pledgedSrcSize,
  3459. cdict->dictContentSize);
  3460. /* Increase window log to fit the entire dictionary and source if the
  3461. * source size is known. Limit the increase to 19, which is the
  3462. * window log for compression level 1 with the largest source size.
  3463. */
  3464. if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
  3465. U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19);
  3466. U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1;
  3467. params.cParams.windowLog = MAX(params.cParams.windowLog, limitedSrcLog);
  3468. }
  3469. params.fParams = fParams;
  3470. return ZSTD_compressBegin_internal(cctx,
  3471. NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,
  3472. cdict,
  3473. &params, pledgedSrcSize,
  3474. ZSTDb_not_buffered);
  3475. }
  3476. }
  3477. /* ZSTD_compressBegin_usingCDict() :
  3478. * pledgedSrcSize=0 means "unknown"
  3479. * if pledgedSrcSize>0, it will enable contentSizeFlag */
  3480. size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
  3481. {
  3482. ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
  3483. DEBUGLOG(4, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag);
  3484. return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
  3485. }
  3486. size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
  3487. void* dst, size_t dstCapacity,
  3488. const void* src, size_t srcSize,
  3489. const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
  3490. {
  3491. FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */
  3492. return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
  3493. }
  3494. /*! ZSTD_compress_usingCDict() :
  3495. * Compression using a digested Dictionary.
  3496. * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
  3497. * Note that compression parameters are decided at CDict creation time
  3498. * while frame parameters are hardcoded */
  3499. size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
  3500. void* dst, size_t dstCapacity,
  3501. const void* src, size_t srcSize,
  3502. const ZSTD_CDict* cdict)
  3503. {
  3504. ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
  3505. return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
  3506. }
  3507. /* ******************************************************************
  3508. * Streaming
  3509. ********************************************************************/
  3510. ZSTD_CStream* ZSTD_createCStream(void)
  3511. {
  3512. DEBUGLOG(3, "ZSTD_createCStream");
  3513. return ZSTD_createCStream_advanced(ZSTD_defaultCMem);
  3514. }
  3515. ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize)
  3516. {
  3517. return ZSTD_initStaticCCtx(workspace, workspaceSize);
  3518. }
  3519. ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)
  3520. { /* CStream and CCtx are now same object */
  3521. return ZSTD_createCCtx_advanced(customMem);
  3522. }
  3523. size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
  3524. {
  3525. return ZSTD_freeCCtx(zcs); /* same object */
  3526. }
  3527. /*====== Initialization ======*/
  3528. size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX; }
  3529. size_t ZSTD_CStreamOutSize(void)
  3530. {
  3531. return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
  3532. }
  3533. static ZSTD_cParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize)
  3534. {
  3535. if (cdict != NULL && ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize))
  3536. return ZSTD_cpm_attachDict;
  3537. else
  3538. return ZSTD_cpm_noAttachDict;
  3539. }
  3540. /* ZSTD_resetCStream():
  3541. * pledgedSrcSize == 0 means "unknown" */
  3542. size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss)
  3543. {
  3544. /* temporary : 0 interpreted as "unknown" during transition period.
  3545. * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
  3546. * 0 will be interpreted as "empty" in the future.
  3547. */
  3548. U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
  3549. DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize);
  3550. FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
  3551. FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
  3552. return 0;
  3553. }
  3554. /*! ZSTD_initCStream_internal() :
  3555. * Note : for lib/compress only. Used by zstdmt_compress.c.
  3556. * Assumption 1 : params are valid
  3557. * Assumption 2 : either dict, or cdict, is defined, not both */
  3558. size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
  3559. const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
  3560. const ZSTD_CCtx_params* params,
  3561. unsigned long long pledgedSrcSize)
  3562. {
  3563. DEBUGLOG(4, "ZSTD_initCStream_internal");
  3564. FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
  3565. FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
  3566. assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
  3567. zcs->requestedParams = *params;
  3568. assert(!((dict) && (cdict))); /* either dict or cdict, not both */
  3569. if (dict) {
  3570. FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
  3571. } else {
  3572. /* Dictionary is cleared if !cdict */
  3573. FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
  3574. }
  3575. return 0;
  3576. }
  3577. /* ZSTD_initCStream_usingCDict_advanced() :
  3578. * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
  3579. size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
  3580. const ZSTD_CDict* cdict,
  3581. ZSTD_frameParameters fParams,
  3582. unsigned long long pledgedSrcSize)
  3583. {
  3584. DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced");
  3585. FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
  3586. FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
  3587. zcs->requestedParams.fParams = fParams;
  3588. FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
  3589. return 0;
  3590. }
  3591. /* note : cdict must outlive compression session */
  3592. size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
  3593. {
  3594. DEBUGLOG(4, "ZSTD_initCStream_usingCDict");
  3595. FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
  3596. FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
  3597. return 0;
  3598. }
  3599. /* ZSTD_initCStream_advanced() :
  3600. * pledgedSrcSize must be exact.
  3601. * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
  3602. * dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */
  3603. size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
  3604. const void* dict, size_t dictSize,
  3605. ZSTD_parameters params, unsigned long long pss)
  3606. {
  3607. /* for compatibility with older programs relying on this behavior.
  3608. * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN.
  3609. * This line will be removed in the future.
  3610. */
  3611. U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
  3612. DEBUGLOG(4, "ZSTD_initCStream_advanced");
  3613. FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
  3614. FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
  3615. FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
  3616. zcs->requestedParams = ZSTD_assignParamsToCCtxParams(&zcs->requestedParams, &params);
  3617. FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
  3618. return 0;
  3619. }
  3620. size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
  3621. {
  3622. DEBUGLOG(4, "ZSTD_initCStream_usingDict");
  3623. FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
  3624. FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
  3625. FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
  3626. return 0;
  3627. }
  3628. size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
  3629. {
  3630. /* temporary : 0 interpreted as "unknown" during transition period.
  3631. * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
  3632. * 0 will be interpreted as "empty" in the future.
  3633. */
  3634. U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
  3635. DEBUGLOG(4, "ZSTD_initCStream_srcSize");
  3636. FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
  3637. FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , "");
  3638. FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
  3639. FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
  3640. return 0;
  3641. }
  3642. size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
  3643. {
  3644. DEBUGLOG(4, "ZSTD_initCStream");
  3645. FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
  3646. FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , "");
  3647. FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
  3648. return 0;
  3649. }
  3650. /*====== Compression ======*/
  3651. static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx)
  3652. {
  3653. size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos;
  3654. if (hintInSize==0) hintInSize = cctx->blockSize;
  3655. return hintInSize;
  3656. }
  3657. /** ZSTD_compressStream_generic():
  3658. * internal function for all *compressStream*() variants
  3659. * non-static, because can be called from zstdmt_compress.c
  3660. * @return : hint size for next input */
  3661. static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
  3662. ZSTD_outBuffer* output,
  3663. ZSTD_inBuffer* input,
  3664. ZSTD_EndDirective const flushMode)
  3665. {
  3666. const char* const istart = (const char*)input->src;
  3667. const char* const iend = input->size != 0 ? istart + input->size : istart;
  3668. const char* ip = input->pos != 0 ? istart + input->pos : istart;
  3669. char* const ostart = (char*)output->dst;
  3670. char* const oend = output->size != 0 ? ostart + output->size : ostart;
  3671. char* op = output->pos != 0 ? ostart + output->pos : ostart;
  3672. U32 someMoreWork = 1;
  3673. /* check expectations */
  3674. DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode);
  3675. if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
  3676. assert(zcs->inBuff != NULL);
  3677. assert(zcs->inBuffSize > 0);
  3678. }
  3679. if (zcs->appliedParams.outBufferMode == ZSTD_bm_buffered) {
  3680. assert(zcs->outBuff != NULL);
  3681. assert(zcs->outBuffSize > 0);
  3682. }
  3683. assert(output->pos <= output->size);
  3684. assert(input->pos <= input->size);
  3685. assert((U32)flushMode <= (U32)ZSTD_e_end);
  3686. while (someMoreWork) {
  3687. switch(zcs->streamStage)
  3688. {
  3689. case zcss_init:
  3690. RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!");
  3691. case zcss_load:
  3692. if ( (flushMode == ZSTD_e_end)
  3693. && ( (size_t)(oend-op) >= ZSTD_compressBound(iend-ip) /* Enough output space */
  3694. || zcs->appliedParams.outBufferMode == ZSTD_bm_stable) /* OR we are allowed to return dstSizeTooSmall */
  3695. && (zcs->inBuffPos == 0) ) {
  3696. /* shortcut to compression pass directly into output buffer */
  3697. size_t const cSize = ZSTD_compressEnd(zcs,
  3698. op, oend-op, ip, iend-ip);
  3699. DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
  3700. FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed");
  3701. ip = iend;
  3702. op += cSize;
  3703. zcs->frameEnded = 1;
  3704. ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
  3705. someMoreWork = 0; break;
  3706. }
  3707. /* complete loading into inBuffer in buffered mode */
  3708. if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
  3709. size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
  3710. size_t const loaded = ZSTD_limitCopy(
  3711. zcs->inBuff + zcs->inBuffPos, toLoad,
  3712. ip, iend-ip);
  3713. zcs->inBuffPos += loaded;
  3714. if (loaded != 0)
  3715. ip += loaded;
  3716. if ( (flushMode == ZSTD_e_continue)
  3717. && (zcs->inBuffPos < zcs->inBuffTarget) ) {
  3718. /* not enough input to fill full block : stop here */
  3719. someMoreWork = 0; break;
  3720. }
  3721. if ( (flushMode == ZSTD_e_flush)
  3722. && (zcs->inBuffPos == zcs->inToCompress) ) {
  3723. /* empty */
  3724. someMoreWork = 0; break;
  3725. }
  3726. }
  3727. /* compress current block (note : this stage cannot be stopped in the middle) */
  3728. DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode);
  3729. { int const inputBuffered = (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered);
  3730. void* cDst;
  3731. size_t cSize;
  3732. size_t oSize = oend-op;
  3733. size_t const iSize = inputBuffered
  3734. ? zcs->inBuffPos - zcs->inToCompress
  3735. : MIN((size_t)(iend - ip), zcs->blockSize);
  3736. if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bm_stable)
  3737. cDst = op; /* compress into output buffer, to skip flush stage */
  3738. else
  3739. cDst = zcs->outBuff, oSize = zcs->outBuffSize;
  3740. if (inputBuffered) {
  3741. unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);
  3742. cSize = lastBlock ?
  3743. ZSTD_compressEnd(zcs, cDst, oSize,
  3744. zcs->inBuff + zcs->inToCompress, iSize) :
  3745. ZSTD_compressContinue(zcs, cDst, oSize,
  3746. zcs->inBuff + zcs->inToCompress, iSize);
  3747. FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
  3748. zcs->frameEnded = lastBlock;
  3749. /* prepare next block */
  3750. zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
  3751. if (zcs->inBuffTarget > zcs->inBuffSize)
  3752. zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
  3753. DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
  3754. (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize);
  3755. if (!lastBlock)
  3756. assert(zcs->inBuffTarget <= zcs->inBuffSize);
  3757. zcs->inToCompress = zcs->inBuffPos;
  3758. } else {
  3759. unsigned const lastBlock = (ip + iSize == iend);
  3760. assert(flushMode == ZSTD_e_end /* Already validated */);
  3761. cSize = lastBlock ?
  3762. ZSTD_compressEnd(zcs, cDst, oSize, ip, iSize) :
  3763. ZSTD_compressContinue(zcs, cDst, oSize, ip, iSize);
  3764. /* Consume the input prior to error checking to mirror buffered mode. */
  3765. if (iSize > 0)
  3766. ip += iSize;
  3767. FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
  3768. zcs->frameEnded = lastBlock;
  3769. if (lastBlock)
  3770. assert(ip == iend);
  3771. }
  3772. if (cDst == op) { /* no need to flush */
  3773. op += cSize;
  3774. if (zcs->frameEnded) {
  3775. DEBUGLOG(5, "Frame completed directly in outBuffer");
  3776. someMoreWork = 0;
  3777. ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
  3778. }
  3779. break;
  3780. }
  3781. zcs->outBuffContentSize = cSize;
  3782. zcs->outBuffFlushedSize = 0;
  3783. zcs->streamStage = zcss_flush; /* pass-through to flush stage */
  3784. }
  3785. /* fall-through */
  3786. case zcss_flush:
  3787. DEBUGLOG(5, "flush stage");
  3788. assert(zcs->appliedParams.outBufferMode == ZSTD_bm_buffered);
  3789. { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
  3790. size_t const flushed = ZSTD_limitCopy(op, (size_t)(oend-op),
  3791. zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
  3792. DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
  3793. (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed);
  3794. if (flushed)
  3795. op += flushed;
  3796. zcs->outBuffFlushedSize += flushed;
  3797. if (toFlush!=flushed) {
  3798. /* flush not fully completed, presumably because dst is too small */
  3799. assert(op==oend);
  3800. someMoreWork = 0;
  3801. break;
  3802. }
  3803. zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
  3804. if (zcs->frameEnded) {
  3805. DEBUGLOG(5, "Frame completed on flush");
  3806. someMoreWork = 0;
  3807. ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
  3808. break;
  3809. }
  3810. zcs->streamStage = zcss_load;
  3811. break;
  3812. }
  3813. default: /* impossible */
  3814. assert(0);
  3815. }
  3816. }
  3817. input->pos = ip - istart;
  3818. output->pos = op - ostart;
  3819. if (zcs->frameEnded) return 0;
  3820. return ZSTD_nextInputSizeHint(zcs);
  3821. }
  3822. static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx)
  3823. {
  3824. #ifdef ZSTD_MULTITHREAD
  3825. if (cctx->appliedParams.nbWorkers >= 1) {
  3826. assert(cctx->mtctx != NULL);
  3827. return ZSTDMT_nextInputSizeHint(cctx->mtctx);
  3828. }
  3829. #endif
  3830. return ZSTD_nextInputSizeHint(cctx);
  3831. }
  3832. size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
  3833. {
  3834. FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) , "");
  3835. return ZSTD_nextInputSizeHint_MTorST(zcs);
  3836. }
  3837. /* After a compression call set the expected input/output buffer.
  3838. * This is validated at the start of the next compression call.
  3839. */
  3840. static void ZSTD_setBufferExpectations(ZSTD_CCtx* cctx, ZSTD_outBuffer const* output, ZSTD_inBuffer const* input)
  3841. {
  3842. if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
  3843. cctx->expectedInBuffer = *input;
  3844. }
  3845. if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {
  3846. cctx->expectedOutBufferSize = output->size - output->pos;
  3847. }
  3848. }
  3849. /* Validate that the input/output buffers match the expectations set by
  3850. * ZSTD_setBufferExpectations.
  3851. */
  3852. static size_t ZSTD_checkBufferStability(ZSTD_CCtx const* cctx,
  3853. ZSTD_outBuffer const* output,
  3854. ZSTD_inBuffer const* input,
  3855. ZSTD_EndDirective endOp)
  3856. {
  3857. if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
  3858. ZSTD_inBuffer const expect = cctx->expectedInBuffer;
  3859. if (expect.src != input->src || expect.pos != input->pos || expect.size != input->size)
  3860. RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer enabled but input differs!");
  3861. if (endOp != ZSTD_e_end)
  3862. RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer can only be used with ZSTD_e_end!");
  3863. }
  3864. if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {
  3865. size_t const outBufferSize = output->size - output->pos;
  3866. if (cctx->expectedOutBufferSize != outBufferSize)
  3867. RETURN_ERROR(dstBuffer_wrong, "ZSTD_c_stableOutBuffer enabled but output size differs!");
  3868. }
  3869. return 0;
  3870. }
  3871. static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
  3872. ZSTD_EndDirective endOp,
  3873. size_t inSize) {
  3874. ZSTD_CCtx_params params = cctx->requestedParams;
  3875. ZSTD_prefixDict const prefixDict = cctx->prefixDict;
  3876. FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */
  3877. ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */
  3878. assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */
  3879. if (cctx->cdict)
  3880. params.compressionLevel = cctx->cdict->compressionLevel; /* let cdict take priority in terms of compression level */
  3881. DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage");
  3882. if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; /* auto-fix pledgedSrcSize */
  3883. {
  3884. size_t const dictSize = prefixDict.dict
  3885. ? prefixDict.dictSize
  3886. : (cctx->cdict ? cctx->cdict->dictContentSize : 0);
  3887. ZSTD_cParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, &params, cctx->pledgedSrcSizePlusOne - 1);
  3888. params.cParams = ZSTD_getCParamsFromCCtxParams(
  3889. &params, cctx->pledgedSrcSizePlusOne-1,
  3890. dictSize, mode);
  3891. }
  3892. if (ZSTD_CParams_shouldEnableLdm(&params.cParams)) {
  3893. /* Enable LDM by default for optimal parser and window size >= 128MB */
  3894. DEBUGLOG(4, "LDM enabled by default (window size >= 128MB, strategy >= btopt)");
  3895. params.ldmParams.enableLdm = 1;
  3896. }
  3897. #ifdef ZSTD_MULTITHREAD
  3898. if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) {
  3899. params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */
  3900. }
  3901. if (params.nbWorkers > 0) {
  3902. /* mt context creation */
  3903. if (cctx->mtctx == NULL) {
  3904. DEBUGLOG(4, "ZSTD_compressStream2: creating new mtctx for nbWorkers=%u",
  3905. params.nbWorkers);
  3906. cctx->mtctx = ZSTDMT_createCCtx_advanced((U32)params.nbWorkers, cctx->customMem, cctx->pool);
  3907. RETURN_ERROR_IF(cctx->mtctx == NULL, memory_allocation, "NULL pointer!");
  3908. }
  3909. /* mt compression */
  3910. DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbWorkers=%u", params.nbWorkers);
  3911. FORWARD_IF_ERROR( ZSTDMT_initCStream_internal(
  3912. cctx->mtctx,
  3913. prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType,
  3914. cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) , "");
  3915. cctx->streamStage = zcss_load;
  3916. cctx->appliedParams = params;
  3917. } else
  3918. #endif
  3919. { U64 const pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1;
  3920. assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
  3921. FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
  3922. prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, ZSTD_dtlm_fast,
  3923. cctx->cdict,
  3924. &params, pledgedSrcSize,
  3925. ZSTDb_buffered) , "");
  3926. assert(cctx->appliedParams.nbWorkers == 0);
  3927. cctx->inToCompress = 0;
  3928. cctx->inBuffPos = 0;
  3929. if (cctx->appliedParams.inBufferMode == ZSTD_bm_buffered) {
  3930. /* for small input: avoid automatic flush on reaching end of block, since
  3931. * it would require to add a 3-bytes null block to end frame
  3932. */
  3933. cctx->inBuffTarget = cctx->blockSize + (cctx->blockSize == pledgedSrcSize);
  3934. } else {
  3935. cctx->inBuffTarget = 0;
  3936. }
  3937. cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0;
  3938. cctx->streamStage = zcss_load;
  3939. cctx->frameEnded = 0;
  3940. }
  3941. return 0;
  3942. }
  3943. size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
  3944. ZSTD_outBuffer* output,
  3945. ZSTD_inBuffer* input,
  3946. ZSTD_EndDirective endOp)
  3947. {
  3948. DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp);
  3949. /* check conditions */
  3950. RETURN_ERROR_IF(output->pos > output->size, dstSize_tooSmall, "invalid output buffer");
  3951. RETURN_ERROR_IF(input->pos > input->size, srcSize_wrong, "invalid input buffer");
  3952. RETURN_ERROR_IF((U32)endOp > (U32)ZSTD_e_end, parameter_outOfBound, "invalid endDirective");
  3953. assert(cctx != NULL);
  3954. /* transparent initialization stage */
  3955. if (cctx->streamStage == zcss_init) {
  3956. FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, input->size), "CompressStream2 initialization failed");
  3957. ZSTD_setBufferExpectations(cctx, output, input); /* Set initial buffer expectations now that we've initialized */
  3958. }
  3959. /* end of transparent initialization stage */
  3960. FORWARD_IF_ERROR(ZSTD_checkBufferStability(cctx, output, input, endOp), "invalid buffers");
  3961. /* compression stage */
  3962. #ifdef ZSTD_MULTITHREAD
  3963. if (cctx->appliedParams.nbWorkers > 0) {
  3964. size_t flushMin;
  3965. if (cctx->cParamsChanged) {
  3966. ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams);
  3967. cctx->cParamsChanged = 0;
  3968. }
  3969. for (;;) {
  3970. size_t const ipos = input->pos;
  3971. size_t const opos = output->pos;
  3972. flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);
  3973. if ( ZSTD_isError(flushMin)
  3974. || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */
  3975. ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
  3976. }
  3977. FORWARD_IF_ERROR(flushMin, "ZSTDMT_compressStream_generic failed");
  3978. if (endOp == ZSTD_e_continue) {
  3979. /* We only require some progress with ZSTD_e_continue, not maximal progress.
  3980. * We're done if we've consumed or produced any bytes, or either buffer is
  3981. * full.
  3982. */
  3983. if (input->pos != ipos || output->pos != opos || input->pos == input->size || output->pos == output->size)
  3984. break;
  3985. } else {
  3986. assert(endOp == ZSTD_e_flush || endOp == ZSTD_e_end);
  3987. /* We require maximal progress. We're done when the flush is complete or the
  3988. * output buffer is full.
  3989. */
  3990. if (flushMin == 0 || output->pos == output->size)
  3991. break;
  3992. }
  3993. }
  3994. DEBUGLOG(5, "completed ZSTD_compressStream2 delegating to ZSTDMT_compressStream_generic");
  3995. /* Either we don't require maximum forward progress, we've finished the
  3996. * flush, or we are out of output space.
  3997. */
  3998. assert(endOp == ZSTD_e_continue || flushMin == 0 || output->pos == output->size);
  3999. ZSTD_setBufferExpectations(cctx, output, input);
  4000. return flushMin;
  4001. }
  4002. #endif
  4003. FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) , "");
  4004. DEBUGLOG(5, "completed ZSTD_compressStream2");
  4005. ZSTD_setBufferExpectations(cctx, output, input);
  4006. return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
  4007. }
  4008. size_t ZSTD_compressStream2_simpleArgs (
  4009. ZSTD_CCtx* cctx,
  4010. void* dst, size_t dstCapacity, size_t* dstPos,
  4011. const void* src, size_t srcSize, size_t* srcPos,
  4012. ZSTD_EndDirective endOp)
  4013. {
  4014. ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
  4015. ZSTD_inBuffer input = { src, srcSize, *srcPos };
  4016. /* ZSTD_compressStream2() will check validity of dstPos and srcPos */
  4017. size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp);
  4018. *dstPos = output.pos;
  4019. *srcPos = input.pos;
  4020. return cErr;
  4021. }
  4022. size_t ZSTD_compress2(ZSTD_CCtx* cctx,
  4023. void* dst, size_t dstCapacity,
  4024. const void* src, size_t srcSize)
  4025. {
  4026. ZSTD_bufferMode_e const originalInBufferMode = cctx->requestedParams.inBufferMode;
  4027. ZSTD_bufferMode_e const originalOutBufferMode = cctx->requestedParams.outBufferMode;
  4028. DEBUGLOG(4, "ZSTD_compress2 (srcSize=%u)", (unsigned)srcSize);
  4029. ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
  4030. /* Enable stable input/output buffers. */
  4031. cctx->requestedParams.inBufferMode = ZSTD_bm_stable;
  4032. cctx->requestedParams.outBufferMode = ZSTD_bm_stable;
  4033. { size_t oPos = 0;
  4034. size_t iPos = 0;
  4035. size_t const result = ZSTD_compressStream2_simpleArgs(cctx,
  4036. dst, dstCapacity, &oPos,
  4037. src, srcSize, &iPos,
  4038. ZSTD_e_end);
  4039. /* Reset to the original values. */
  4040. cctx->requestedParams.inBufferMode = originalInBufferMode;
  4041. cctx->requestedParams.outBufferMode = originalOutBufferMode;
  4042. FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed");
  4043. if (result != 0) { /* compression not completed, due to lack of output space */
  4044. assert(oPos == dstCapacity);
  4045. RETURN_ERROR(dstSize_tooSmall, "");
  4046. }
  4047. assert(iPos == srcSize); /* all input is expected consumed */
  4048. return oPos;
  4049. }
  4050. }
  4051. typedef struct {
  4052. U32 idx; /* Index in array of ZSTD_Sequence */
  4053. U32 posInSequence; /* Position within sequence at idx */
  4054. size_t posInSrc; /* Number of bytes given by sequences provided so far */
  4055. } ZSTD_sequencePosition;
  4056. /* Returns a ZSTD error code if sequence is not valid */
  4057. static size_t ZSTD_validateSequence(U32 offCode, U32 matchLength,
  4058. size_t posInSrc, U32 windowLog, size_t dictSize, U32 minMatch) {
  4059. size_t offsetBound;
  4060. U32 windowSize = 1 << windowLog;
  4061. /* posInSrc represents the amount of data the the decoder would decode up to this point.
  4062. * As long as the amount of data decoded is less than or equal to window size, offsets may be
  4063. * larger than the total length of output decoded in order to reference the dict, even larger than
  4064. * window size. After output surpasses windowSize, we're limited to windowSize offsets again.
  4065. */
  4066. offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize;
  4067. RETURN_ERROR_IF(offCode > offsetBound + ZSTD_REP_MOVE, corruption_detected, "Offset too large!");
  4068. RETURN_ERROR_IF(matchLength < minMatch, corruption_detected, "Matchlength too small");
  4069. return 0;
  4070. }
  4071. /* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */
  4072. static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0) {
  4073. U32 offCode = rawOffset + ZSTD_REP_MOVE;
  4074. U32 repCode = 0;
  4075. if (!ll0 && rawOffset == rep[0]) {
  4076. repCode = 1;
  4077. } else if (rawOffset == rep[1]) {
  4078. repCode = 2 - ll0;
  4079. } else if (rawOffset == rep[2]) {
  4080. repCode = 3 - ll0;
  4081. } else if (ll0 && rawOffset == rep[0] - 1) {
  4082. repCode = 3;
  4083. }
  4084. if (repCode) {
  4085. /* ZSTD_storeSeq expects a number in the range [0, 2] to represent a repcode */
  4086. offCode = repCode - 1;
  4087. }
  4088. return offCode;
  4089. }
  4090. /* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of
  4091. * ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter.
  4092. */
  4093. static size_t ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
  4094. const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
  4095. const void* src, size_t blockSize) {
  4096. U32 idx = seqPos->idx;
  4097. BYTE const* ip = (BYTE const*)(src);
  4098. const BYTE* const iend = ip + blockSize;
  4099. repcodes_t updatedRepcodes;
  4100. U32 dictSize;
  4101. U32 litLength;
  4102. U32 matchLength;
  4103. U32 ll0;
  4104. U32 offCode;
  4105. if (cctx->cdict) {
  4106. dictSize = (U32)cctx->cdict->dictContentSize;
  4107. } else if (cctx->prefixDict.dict) {
  4108. dictSize = (U32)cctx->prefixDict.dictSize;
  4109. } else {
  4110. dictSize = 0;
  4111. }
  4112. ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
  4113. for (; (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0) && idx < inSeqsSize; ++idx) {
  4114. litLength = inSeqs[idx].litLength;
  4115. matchLength = inSeqs[idx].matchLength;
  4116. ll0 = litLength == 0;
  4117. offCode = ZSTD_finalizeOffCode(inSeqs[idx].offset, updatedRepcodes.rep, ll0);
  4118. updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
  4119. DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
  4120. if (cctx->appliedParams.validateSequences) {
  4121. seqPos->posInSrc += litLength + matchLength;
  4122. FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
  4123. cctx->appliedParams.cParams.windowLog, dictSize,
  4124. cctx->appliedParams.cParams.minMatch),
  4125. "Sequence validation failed");
  4126. }
  4127. RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
  4128. "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
  4129. ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength - MINMATCH);
  4130. ip += matchLength + litLength;
  4131. }
  4132. ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
  4133. if (inSeqs[idx].litLength) {
  4134. DEBUGLOG(6, "Storing last literals of size: %u", inSeqs[idx].litLength);
  4135. ZSTD_storeLastLiterals(&cctx->seqStore, ip, inSeqs[idx].litLength);
  4136. ip += inSeqs[idx].litLength;
  4137. seqPos->posInSrc += inSeqs[idx].litLength;
  4138. }
  4139. RETURN_ERROR_IF(ip != iend, corruption_detected, "Blocksize doesn't agree with block delimiter!");
  4140. seqPos->idx = idx+1;
  4141. return 0;
  4142. }
  4143. /* Returns the number of bytes to move the current read position back by. Only non-zero
  4144. * if we ended up splitting a sequence. Otherwise, it may return a ZSTD error if something
  4145. * went wrong.
  4146. *
  4147. * This function will attempt to scan through blockSize bytes represented by the sequences
  4148. * in inSeqs, storing any (partial) sequences.
  4149. *
  4150. * Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to
  4151. * avoid splitting a match, or to avoid splitting a match such that it would produce a match
  4152. * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block.
  4153. */
  4154. static size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
  4155. const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
  4156. const void* src, size_t blockSize) {
  4157. U32 idx = seqPos->idx;
  4158. U32 startPosInSequence = seqPos->posInSequence;
  4159. U32 endPosInSequence = seqPos->posInSequence + (U32)blockSize;
  4160. size_t dictSize;
  4161. BYTE const* ip = (BYTE const*)(src);
  4162. BYTE const* iend = ip + blockSize; /* May be adjusted if we decide to process fewer than blockSize bytes */
  4163. repcodes_t updatedRepcodes;
  4164. U32 bytesAdjustment = 0;
  4165. U32 finalMatchSplit = 0;
  4166. U32 litLength;
  4167. U32 matchLength;
  4168. U32 rawOffset;
  4169. U32 offCode;
  4170. if (cctx->cdict) {
  4171. dictSize = cctx->cdict->dictContentSize;
  4172. } else if (cctx->prefixDict.dict) {
  4173. dictSize = cctx->prefixDict.dictSize;
  4174. } else {
  4175. dictSize = 0;
  4176. }
  4177. DEBUGLOG(5, "ZSTD_copySequencesToSeqStore: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize);
  4178. DEBUGLOG(5, "Start seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
  4179. ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
  4180. while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) {
  4181. const ZSTD_Sequence currSeq = inSeqs[idx];
  4182. litLength = currSeq.litLength;
  4183. matchLength = currSeq.matchLength;
  4184. rawOffset = currSeq.offset;
  4185. /* Modify the sequence depending on where endPosInSequence lies */
  4186. if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) {
  4187. if (startPosInSequence >= litLength) {
  4188. startPosInSequence -= litLength;
  4189. litLength = 0;
  4190. matchLength -= startPosInSequence;
  4191. } else {
  4192. litLength -= startPosInSequence;
  4193. }
  4194. /* Move to the next sequence */
  4195. endPosInSequence -= currSeq.litLength + currSeq.matchLength;
  4196. startPosInSequence = 0;
  4197. idx++;
  4198. } else {
  4199. /* This is the final (partial) sequence we're adding from inSeqs, and endPosInSequence
  4200. does not reach the end of the match. So, we have to split the sequence */
  4201. DEBUGLOG(6, "Require a split: diff: %u, idx: %u PIS: %u",
  4202. currSeq.litLength + currSeq.matchLength - endPosInSequence, idx, endPosInSequence);
  4203. if (endPosInSequence > litLength) {
  4204. U32 firstHalfMatchLength;
  4205. litLength = startPosInSequence >= litLength ? 0 : litLength - startPosInSequence;
  4206. firstHalfMatchLength = endPosInSequence - startPosInSequence - litLength;
  4207. if (matchLength > blockSize && firstHalfMatchLength >= cctx->appliedParams.cParams.minMatch) {
  4208. /* Only ever split the match if it is larger than the block size */
  4209. U32 secondHalfMatchLength = currSeq.matchLength + currSeq.litLength - endPosInSequence;
  4210. if (secondHalfMatchLength < cctx->appliedParams.cParams.minMatch) {
  4211. /* Move the endPosInSequence backward so that it creates match of minMatch length */
  4212. endPosInSequence -= cctx->appliedParams.cParams.minMatch - secondHalfMatchLength;
  4213. bytesAdjustment = cctx->appliedParams.cParams.minMatch - secondHalfMatchLength;
  4214. firstHalfMatchLength -= bytesAdjustment;
  4215. }
  4216. matchLength = firstHalfMatchLength;
  4217. /* Flag that we split the last match - after storing the sequence, exit the loop,
  4218. but keep the value of endPosInSequence */
  4219. finalMatchSplit = 1;
  4220. } else {
  4221. /* Move the position in sequence backwards so that we don't split match, and break to store
  4222. * the last literals. We use the original currSeq.litLength as a marker for where endPosInSequence
  4223. * should go. We prefer to do this whenever it is not necessary to split the match, or if doing so
  4224. * would cause the first half of the match to be too small
  4225. */
  4226. bytesAdjustment = endPosInSequence - currSeq.litLength;
  4227. endPosInSequence = currSeq.litLength;
  4228. break;
  4229. }
  4230. } else {
  4231. /* This sequence ends inside the literals, break to store the last literals */
  4232. break;
  4233. }
  4234. }
  4235. /* Check if this offset can be represented with a repcode */
  4236. { U32 ll0 = (litLength == 0);
  4237. offCode = ZSTD_finalizeOffCode(rawOffset, updatedRepcodes.rep, ll0);
  4238. updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
  4239. }
  4240. if (cctx->appliedParams.validateSequences) {
  4241. seqPos->posInSrc += litLength + matchLength;
  4242. FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
  4243. cctx->appliedParams.cParams.windowLog, dictSize,
  4244. cctx->appliedParams.cParams.minMatch),
  4245. "Sequence validation failed");
  4246. }
  4247. DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
  4248. RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
  4249. "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
  4250. ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength - MINMATCH);
  4251. ip += matchLength + litLength;
  4252. }
  4253. DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
  4254. assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength);
  4255. seqPos->idx = idx;
  4256. seqPos->posInSequence = endPosInSequence;
  4257. ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
  4258. iend -= bytesAdjustment;
  4259. if (ip != iend) {
  4260. /* Store any last literals */
  4261. U32 lastLLSize = (U32)(iend - ip);
  4262. assert(ip <= iend);
  4263. DEBUGLOG(6, "Storing last literals of size: %u", lastLLSize);
  4264. ZSTD_storeLastLiterals(&cctx->seqStore, ip, lastLLSize);
  4265. seqPos->posInSrc += lastLLSize;
  4266. }
  4267. return bytesAdjustment;
  4268. }
  4269. typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
  4270. const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
  4271. const void* src, size_t blockSize);
  4272. static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) {
  4273. ZSTD_sequenceCopier sequenceCopier = NULL;
  4274. assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, mode));
  4275. if (mode == ZSTD_sf_explicitBlockDelimiters) {
  4276. return ZSTD_copySequencesToSeqStoreExplicitBlockDelim;
  4277. } else if (mode == ZSTD_sf_noBlockDelimiters) {
  4278. return ZSTD_copySequencesToSeqStoreNoBlockDelim;
  4279. }
  4280. assert(sequenceCopier != NULL);
  4281. return sequenceCopier;
  4282. }
  4283. /* Compress, block-by-block, all of the sequences given.
  4284. *
  4285. * Returns the cumulative size of all compressed blocks (including their headers), otherwise a ZSTD error.
  4286. */
  4287. static size_t ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
  4288. void* dst, size_t dstCapacity,
  4289. const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
  4290. const void* src, size_t srcSize) {
  4291. size_t cSize = 0;
  4292. U32 lastBlock;
  4293. size_t blockSize;
  4294. size_t compressedSeqsSize;
  4295. size_t remaining = srcSize;
  4296. ZSTD_sequencePosition seqPos = {0, 0, 0};
  4297. BYTE const* ip = (BYTE const*)src;
  4298. BYTE* op = (BYTE*)dst;
  4299. ZSTD_sequenceCopier sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters);
  4300. DEBUGLOG(4, "ZSTD_compressSequences_internal srcSize: %zu, inSeqsSize: %zu", srcSize, inSeqsSize);
  4301. /* Special case: empty frame */
  4302. if (remaining == 0) {
  4303. U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1);
  4304. RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "No room for empty frame block header");
  4305. MEM_writeLE32(op, cBlockHeader24);
  4306. op += ZSTD_blockHeaderSize;
  4307. dstCapacity -= ZSTD_blockHeaderSize;
  4308. cSize += ZSTD_blockHeaderSize;
  4309. }
  4310. while (remaining) {
  4311. size_t cBlockSize;
  4312. size_t additionalByteAdjustment;
  4313. lastBlock = remaining <= cctx->blockSize;
  4314. blockSize = lastBlock ? (U32)remaining : (U32)cctx->blockSize;
  4315. ZSTD_resetSeqStore(&cctx->seqStore);
  4316. DEBUGLOG(4, "Working on new block. Blocksize: %zu", blockSize);
  4317. additionalByteAdjustment = sequenceCopier(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize);
  4318. FORWARD_IF_ERROR(additionalByteAdjustment, "Bad sequence copy");
  4319. blockSize -= additionalByteAdjustment;
  4320. /* If blocks are too small, emit as a nocompress block */
  4321. if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
  4322. cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
  4323. FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
  4324. DEBUGLOG(4, "Block too small, writing out nocompress block: cSize: %zu", cBlockSize);
  4325. cSize += cBlockSize;
  4326. ip += blockSize;
  4327. op += cBlockSize;
  4328. remaining -= blockSize;
  4329. dstCapacity -= cBlockSize;
  4330. continue;
  4331. }
  4332. compressedSeqsSize = ZSTD_entropyCompressSequences(&cctx->seqStore,
  4333. &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy,
  4334. &cctx->appliedParams,
  4335. op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize,
  4336. blockSize,
  4337. cctx->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
  4338. cctx->bmi2);
  4339. FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed");
  4340. DEBUGLOG(4, "Compressed sequences size: %zu", compressedSeqsSize);
  4341. if (!cctx->isFirstBlock &&
  4342. ZSTD_maybeRLE(&cctx->seqStore) &&
  4343. ZSTD_isRLE((BYTE const*)src, srcSize)) {
  4344. /* We don't want to emit our first block as a RLE even if it qualifies because
  4345. * doing so will cause the decoder (cli only) to throw a "should consume all input error."
  4346. * This is only an issue for zstd <= v1.4.3
  4347. */
  4348. compressedSeqsSize = 1;
  4349. }
  4350. if (compressedSeqsSize == 0) {
  4351. /* ZSTD_noCompressBlock writes the block header as well */
  4352. cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
  4353. FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
  4354. DEBUGLOG(4, "Writing out nocompress block, size: %zu", cBlockSize);
  4355. } else if (compressedSeqsSize == 1) {
  4356. cBlockSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, blockSize, lastBlock);
  4357. FORWARD_IF_ERROR(cBlockSize, "RLE compress block failed");
  4358. DEBUGLOG(4, "Writing out RLE block, size: %zu", cBlockSize);
  4359. } else {
  4360. U32 cBlockHeader;
  4361. /* Error checking and repcodes update */
  4362. ZSTD_confirmRepcodesAndEntropyTables(cctx);
  4363. if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
  4364. cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
  4365. /* Write block header into beginning of block*/
  4366. cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3);
  4367. MEM_writeLE24(op, cBlockHeader);
  4368. cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize;
  4369. DEBUGLOG(4, "Writing out compressed block, size: %zu", cBlockSize);
  4370. }
  4371. cSize += cBlockSize;
  4372. DEBUGLOG(4, "cSize running total: %zu", cSize);
  4373. if (lastBlock) {
  4374. break;
  4375. } else {
  4376. ip += blockSize;
  4377. op += cBlockSize;
  4378. remaining -= blockSize;
  4379. dstCapacity -= cBlockSize;
  4380. cctx->isFirstBlock = 0;
  4381. }
  4382. }
  4383. return cSize;
  4384. }
  4385. size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapacity,
  4386. const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
  4387. const void* src, size_t srcSize) {
  4388. BYTE* op = (BYTE*)dst;
  4389. size_t cSize = 0;
  4390. size_t compressedBlocksSize = 0;
  4391. size_t frameHeaderSize = 0;
  4392. /* Transparent initialization stage, same as compressStream2() */
  4393. DEBUGLOG(3, "ZSTD_compressSequences()");
  4394. assert(cctx != NULL);
  4395. FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, srcSize), "CCtx initialization failed");
  4396. /* Begin writing output, starting with frame header */
  4397. frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, &cctx->appliedParams, srcSize, cctx->dictID);
  4398. op += frameHeaderSize;
  4399. dstCapacity -= frameHeaderSize;
  4400. cSize += frameHeaderSize;
  4401. if (cctx->appliedParams.fParams.checksumFlag && srcSize) {
  4402. XXH64_update(&cctx->xxhState, src, srcSize);
  4403. }
  4404. /* cSize includes block header size and compressed sequences size */
  4405. compressedBlocksSize = ZSTD_compressSequences_internal(cctx,
  4406. op, dstCapacity,
  4407. inSeqs, inSeqsSize,
  4408. src, srcSize);
  4409. FORWARD_IF_ERROR(compressedBlocksSize, "Compressing blocks failed!");
  4410. cSize += compressedBlocksSize;
  4411. dstCapacity -= compressedBlocksSize;
  4412. if (cctx->appliedParams.fParams.checksumFlag) {
  4413. U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
  4414. RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
  4415. DEBUGLOG(4, "Write checksum : %08X", (unsigned)checksum);
  4416. MEM_writeLE32((char*)dst + cSize, checksum);
  4417. cSize += 4;
  4418. }
  4419. DEBUGLOG(3, "Final compressed size: %zu", cSize);
  4420. return cSize;
  4421. }
  4422. /*====== Finalize ======*/
  4423. /*! ZSTD_flushStream() :
  4424. * @return : amount of data remaining to flush */
  4425. size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
  4426. {
  4427. ZSTD_inBuffer input = { NULL, 0, 0 };
  4428. return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush);
  4429. }
  4430. size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
  4431. {
  4432. ZSTD_inBuffer input = { NULL, 0, 0 };
  4433. size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end);
  4434. FORWARD_IF_ERROR( remainingToFlush , "ZSTD_compressStream2 failed");
  4435. if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush; /* minimal estimation */
  4436. /* single thread mode : attempt to calculate remaining to flush more precisely */
  4437. { size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
  4438. size_t const checksumSize = (size_t)(zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4);
  4439. size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize;
  4440. DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush);
  4441. return toFlush;
  4442. }
  4443. }
  4444. /*-===== Pre-defined compression levels =====-*/
  4445. #define ZSTD_MAX_CLEVEL 22
  4446. int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
  4447. int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; }
  4448. static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
  4449. { /* "default" - for any srcSize > 256 KB */
  4450. /* W, C, H, S, L, TL, strat */
  4451. { 19, 12, 13, 1, 6, 1, ZSTD_fast }, /* base for negative levels */
  4452. { 19, 13, 14, 1, 7, 0, ZSTD_fast }, /* level 1 */
  4453. { 20, 15, 16, 1, 6, 0, ZSTD_fast }, /* level 2 */
  4454. { 21, 16, 17, 1, 5, 0, ZSTD_dfast }, /* level 3 */
  4455. { 21, 18, 18, 1, 5, 0, ZSTD_dfast }, /* level 4 */
  4456. { 21, 18, 19, 2, 5, 2, ZSTD_greedy }, /* level 5 */
  4457. { 21, 19, 19, 3, 5, 4, ZSTD_greedy }, /* level 6 */
  4458. { 21, 19, 19, 3, 5, 8, ZSTD_lazy }, /* level 7 */
  4459. { 21, 19, 19, 3, 5, 16, ZSTD_lazy2 }, /* level 8 */
  4460. { 21, 19, 20, 4, 5, 16, ZSTD_lazy2 }, /* level 9 */
  4461. { 22, 20, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 10 */
  4462. { 22, 21, 22, 4, 5, 16, ZSTD_lazy2 }, /* level 11 */
  4463. { 22, 21, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 12 */
  4464. { 22, 21, 22, 5, 5, 32, ZSTD_btlazy2 }, /* level 13 */
  4465. { 22, 22, 23, 5, 5, 32, ZSTD_btlazy2 }, /* level 14 */
  4466. { 22, 23, 23, 6, 5, 32, ZSTD_btlazy2 }, /* level 15 */
  4467. { 22, 22, 22, 5, 5, 48, ZSTD_btopt }, /* level 16 */
  4468. { 23, 23, 22, 5, 4, 64, ZSTD_btopt }, /* level 17 */
  4469. { 23, 23, 22, 6, 3, 64, ZSTD_btultra }, /* level 18 */
  4470. { 23, 24, 22, 7, 3,256, ZSTD_btultra2}, /* level 19 */
  4471. { 25, 25, 23, 7, 3,256, ZSTD_btultra2}, /* level 20 */
  4472. { 26, 26, 24, 7, 3,512, ZSTD_btultra2}, /* level 21 */
  4473. { 27, 27, 25, 9, 3,999, ZSTD_btultra2}, /* level 22 */
  4474. },
  4475. { /* for srcSize <= 256 KB */
  4476. /* W, C, H, S, L, T, strat */
  4477. { 18, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
  4478. { 18, 13, 14, 1, 6, 0, ZSTD_fast }, /* level 1 */
  4479. { 18, 14, 14, 1, 5, 0, ZSTD_dfast }, /* level 2 */
  4480. { 18, 16, 16, 1, 4, 0, ZSTD_dfast }, /* level 3 */
  4481. { 18, 16, 17, 2, 5, 2, ZSTD_greedy }, /* level 4.*/
  4482. { 18, 18, 18, 3, 5, 2, ZSTD_greedy }, /* level 5.*/
  4483. { 18, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6.*/
  4484. { 18, 18, 19, 4, 4, 4, ZSTD_lazy }, /* level 7 */
  4485. { 18, 18, 19, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
  4486. { 18, 18, 19, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
  4487. { 18, 18, 19, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
  4488. { 18, 18, 19, 5, 4, 12, ZSTD_btlazy2 }, /* level 11.*/
  4489. { 18, 19, 19, 7, 4, 12, ZSTD_btlazy2 }, /* level 12.*/
  4490. { 18, 18, 19, 4, 4, 16, ZSTD_btopt }, /* level 13 */
  4491. { 18, 18, 19, 4, 3, 32, ZSTD_btopt }, /* level 14.*/
  4492. { 18, 18, 19, 6, 3,128, ZSTD_btopt }, /* level 15.*/
  4493. { 18, 19, 19, 6, 3,128, ZSTD_btultra }, /* level 16.*/
  4494. { 18, 19, 19, 8, 3,256, ZSTD_btultra }, /* level 17.*/
  4495. { 18, 19, 19, 6, 3,128, ZSTD_btultra2}, /* level 18.*/
  4496. { 18, 19, 19, 8, 3,256, ZSTD_btultra2}, /* level 19.*/
  4497. { 18, 19, 19, 10, 3,512, ZSTD_btultra2}, /* level 20.*/
  4498. { 18, 19, 19, 12, 3,512, ZSTD_btultra2}, /* level 21.*/
  4499. { 18, 19, 19, 13, 3,999, ZSTD_btultra2}, /* level 22.*/
  4500. },
  4501. { /* for srcSize <= 128 KB */
  4502. /* W, C, H, S, L, T, strat */
  4503. { 17, 12, 12, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
  4504. { 17, 12, 13, 1, 6, 0, ZSTD_fast }, /* level 1 */
  4505. { 17, 13, 15, 1, 5, 0, ZSTD_fast }, /* level 2 */
  4506. { 17, 15, 16, 2, 5, 0, ZSTD_dfast }, /* level 3 */
  4507. { 17, 17, 17, 2, 4, 0, ZSTD_dfast }, /* level 4 */
  4508. { 17, 16, 17, 3, 4, 2, ZSTD_greedy }, /* level 5 */
  4509. { 17, 17, 17, 3, 4, 4, ZSTD_lazy }, /* level 6 */
  4510. { 17, 17, 17, 3, 4, 8, ZSTD_lazy2 }, /* level 7 */
  4511. { 17, 17, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
  4512. { 17, 17, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
  4513. { 17, 17, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
  4514. { 17, 17, 17, 5, 4, 8, ZSTD_btlazy2 }, /* level 11 */
  4515. { 17, 18, 17, 7, 4, 12, ZSTD_btlazy2 }, /* level 12 */
  4516. { 17, 18, 17, 3, 4, 12, ZSTD_btopt }, /* level 13.*/
  4517. { 17, 18, 17, 4, 3, 32, ZSTD_btopt }, /* level 14.*/
  4518. { 17, 18, 17, 6, 3,256, ZSTD_btopt }, /* level 15.*/
  4519. { 17, 18, 17, 6, 3,128, ZSTD_btultra }, /* level 16.*/
  4520. { 17, 18, 17, 8, 3,256, ZSTD_btultra }, /* level 17.*/
  4521. { 17, 18, 17, 10, 3,512, ZSTD_btultra }, /* level 18.*/
  4522. { 17, 18, 17, 5, 3,256, ZSTD_btultra2}, /* level 19.*/
  4523. { 17, 18, 17, 7, 3,512, ZSTD_btultra2}, /* level 20.*/
  4524. { 17, 18, 17, 9, 3,512, ZSTD_btultra2}, /* level 21.*/
  4525. { 17, 18, 17, 11, 3,999, ZSTD_btultra2}, /* level 22.*/
  4526. },
  4527. { /* for srcSize <= 16 KB */
  4528. /* W, C, H, S, L, T, strat */
  4529. { 14, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
  4530. { 14, 14, 15, 1, 5, 0, ZSTD_fast }, /* level 1 */
  4531. { 14, 14, 15, 1, 4, 0, ZSTD_fast }, /* level 2 */
  4532. { 14, 14, 15, 2, 4, 0, ZSTD_dfast }, /* level 3 */
  4533. { 14, 14, 14, 4, 4, 2, ZSTD_greedy }, /* level 4 */
  4534. { 14, 14, 14, 3, 4, 4, ZSTD_lazy }, /* level 5.*/
  4535. { 14, 14, 14, 4, 4, 8, ZSTD_lazy2 }, /* level 6 */
  4536. { 14, 14, 14, 6, 4, 8, ZSTD_lazy2 }, /* level 7 */
  4537. { 14, 14, 14, 8, 4, 8, ZSTD_lazy2 }, /* level 8.*/
  4538. { 14, 15, 14, 5, 4, 8, ZSTD_btlazy2 }, /* level 9.*/
  4539. { 14, 15, 14, 9, 4, 8, ZSTD_btlazy2 }, /* level 10.*/
  4540. { 14, 15, 14, 3, 4, 12, ZSTD_btopt }, /* level 11.*/
  4541. { 14, 15, 14, 4, 3, 24, ZSTD_btopt }, /* level 12.*/
  4542. { 14, 15, 14, 5, 3, 32, ZSTD_btultra }, /* level 13.*/
  4543. { 14, 15, 15, 6, 3, 64, ZSTD_btultra }, /* level 14.*/
  4544. { 14, 15, 15, 7, 3,256, ZSTD_btultra }, /* level 15.*/
  4545. { 14, 15, 15, 5, 3, 48, ZSTD_btultra2}, /* level 16.*/
  4546. { 14, 15, 15, 6, 3,128, ZSTD_btultra2}, /* level 17.*/
  4547. { 14, 15, 15, 7, 3,256, ZSTD_btultra2}, /* level 18.*/
  4548. { 14, 15, 15, 8, 3,256, ZSTD_btultra2}, /* level 19.*/
  4549. { 14, 15, 15, 8, 3,512, ZSTD_btultra2}, /* level 20.*/
  4550. { 14, 15, 15, 9, 3,512, ZSTD_btultra2}, /* level 21.*/
  4551. { 14, 15, 15, 10, 3,999, ZSTD_btultra2}, /* level 22.*/
  4552. },
  4553. };
  4554. static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(int const compressionLevel, size_t const dictSize)
  4555. {
  4556. ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, 0, dictSize, ZSTD_cpm_createCDict);
  4557. switch (cParams.strategy) {
  4558. case ZSTD_fast:
  4559. case ZSTD_dfast:
  4560. break;
  4561. case ZSTD_greedy:
  4562. case ZSTD_lazy:
  4563. case ZSTD_lazy2:
  4564. cParams.hashLog += ZSTD_LAZY_DDSS_BUCKET_LOG;
  4565. break;
  4566. case ZSTD_btlazy2:
  4567. case ZSTD_btopt:
  4568. case ZSTD_btultra:
  4569. case ZSTD_btultra2:
  4570. break;
  4571. }
  4572. return cParams;
  4573. }
  4574. static int ZSTD_dedicatedDictSearch_isSupported(
  4575. ZSTD_compressionParameters const* cParams)
  4576. {
  4577. return (cParams->strategy >= ZSTD_greedy) && (cParams->strategy <= ZSTD_lazy2);
  4578. }
  4579. /**
  4580. * Reverses the adjustment applied to cparams when enabling dedicated dict
  4581. * search. This is used to recover the params set to be used in the working
  4582. * context. (Otherwise, those tables would also grow.)
  4583. */
  4584. static void ZSTD_dedicatedDictSearch_revertCParams(
  4585. ZSTD_compressionParameters* cParams) {
  4586. switch (cParams->strategy) {
  4587. case ZSTD_fast:
  4588. case ZSTD_dfast:
  4589. break;
  4590. case ZSTD_greedy:
  4591. case ZSTD_lazy:
  4592. case ZSTD_lazy2:
  4593. cParams->hashLog -= ZSTD_LAZY_DDSS_BUCKET_LOG;
  4594. break;
  4595. case ZSTD_btlazy2:
  4596. case ZSTD_btopt:
  4597. case ZSTD_btultra:
  4598. case ZSTD_btultra2:
  4599. break;
  4600. }
  4601. }
  4602. static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
  4603. {
  4604. switch (mode) {
  4605. case ZSTD_cpm_unknown:
  4606. case ZSTD_cpm_noAttachDict:
  4607. case ZSTD_cpm_createCDict:
  4608. break;
  4609. case ZSTD_cpm_attachDict:
  4610. dictSize = 0;
  4611. break;
  4612. default:
  4613. assert(0);
  4614. break;
  4615. }
  4616. { int const unknown = srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN;
  4617. size_t const addedSize = unknown && dictSize > 0 ? 500 : 0;
  4618. return unknown && dictSize == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : srcSizeHint+dictSize+addedSize;
  4619. }
  4620. }
  4621. /*! ZSTD_getCParams_internal() :
  4622. * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
  4623. * Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown.
  4624. * Use dictSize == 0 for unknown or unused.
  4625. * Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_cParamMode_e`. */
  4626. static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
  4627. {
  4628. U64 const rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode);
  4629. U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);
  4630. int row;
  4631. DEBUGLOG(5, "ZSTD_getCParams_internal (cLevel=%i)", compressionLevel);
  4632. /* row */
  4633. if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
  4634. else if (compressionLevel < 0) row = 0; /* entry 0 is baseline for fast mode */
  4635. else if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
  4636. else row = compressionLevel;
  4637. { ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
  4638. /* acceleration factor */
  4639. if (compressionLevel < 0) {
  4640. int const clampedCompressionLevel = MAX(ZSTD_minCLevel(), compressionLevel);
  4641. cp.targetLength = (unsigned)(-clampedCompressionLevel);
  4642. }
  4643. /* refine parameters based on srcSize & dictSize */
  4644. return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode);
  4645. }
  4646. }
  4647. /*! ZSTD_getCParams() :
  4648. * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
  4649. * Size values are optional, provide 0 if not known or unused */
  4650. ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
  4651. {
  4652. if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
  4653. return ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
  4654. }
  4655. /*! ZSTD_getParams() :
  4656. * same idea as ZSTD_getCParams()
  4657. * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
  4658. * Fields of `ZSTD_frameParameters` are set to default values */
  4659. static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) {
  4660. ZSTD_parameters params;
  4661. ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, mode);
  4662. DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel);
  4663. ZSTD_memset(&params, 0, sizeof(params));
  4664. params.cParams = cParams;
  4665. params.fParams.contentSizeFlag = 1;
  4666. return params;
  4667. }
  4668. /*! ZSTD_getParams() :
  4669. * same idea as ZSTD_getCParams()
  4670. * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
  4671. * Fields of `ZSTD_frameParameters` are set to default values */
  4672. ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
  4673. if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
  4674. return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
  4675. }