zstd_compress.c 274 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327
  1. /*
  2. * Copyright (c) Yann Collet, Facebook, Inc.
  3. * All rights reserved.
  4. *
  5. * This source code is licensed under both the BSD-style license (found in the
  6. * LICENSE file in the root directory of this source tree) and the GPLv2 (found
  7. * in the COPYING file in the root directory of this source tree).
  8. * You may select, at your option, one of the above-listed licenses.
  9. */
  10. /*-*************************************
  11. * Dependencies
  12. ***************************************/
  13. #include "../common/zstd_deps.h" /* INT_MAX, ZSTD_memset, ZSTD_memcpy */
  14. #include "../common/mem.h"
  15. #include "hist.h" /* HIST_countFast_wksp */
  16. #define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */
  17. #include "../common/fse.h"
  18. #define HUF_STATIC_LINKING_ONLY
  19. #include "../common/huf.h"
  20. #include "zstd_compress_internal.h"
  21. #include "zstd_compress_sequences.h"
  22. #include "zstd_compress_literals.h"
  23. #include "zstd_fast.h"
  24. #include "zstd_double_fast.h"
  25. #include "zstd_lazy.h"
  26. #include "zstd_opt.h"
  27. #include "zstd_ldm.h"
  28. #include "zstd_compress_superblock.h"
  29. /* ***************************************************************
  30. * Tuning parameters
  31. *****************************************************************/
  32. /*!
  33. * COMPRESS_HEAPMODE :
  34. * Select how default decompression function ZSTD_compress() allocates its context,
  35. * on stack (0, default), or into heap (1).
  36. * Note that functions with explicit context such as ZSTD_compressCCtx() are unaffected.
  37. */
  38. #ifndef ZSTD_COMPRESS_HEAPMODE
  39. # define ZSTD_COMPRESS_HEAPMODE 0
  40. #endif
  41. /*!
  42. * ZSTD_HASHLOG3_MAX :
  43. * Maximum size of the hash table dedicated to find 3-bytes matches,
  44. * in log format, aka 17 => 1 << 17 == 128Ki positions.
  45. * This structure is only used in zstd_opt.
  46. * Since allocation is centralized for all strategies, it has to be known here.
  47. * The actual (selected) size of the hash table is then stored in ZSTD_matchState_t.hashLog3,
  48. * so that zstd_opt.c doesn't need to know about this constant.
  49. */
  50. #ifndef ZSTD_HASHLOG3_MAX
  51. # define ZSTD_HASHLOG3_MAX 17
  52. #endif
  53. /*-*************************************
  54. * Helper functions
  55. ***************************************/
  56. /* ZSTD_compressBound()
  57. * Note that the result from this function is only compatible with the "normal"
  58. * full-block strategy.
  59. * When there are a lot of small blocks due to frequent flush in streaming mode
  60. * the overhead of headers can make the compressed data to be larger than the
  61. * return value of ZSTD_compressBound().
  62. */
  63. size_t ZSTD_compressBound(size_t srcSize) {
  64. return ZSTD_COMPRESSBOUND(srcSize);
  65. }
  66. /*-*************************************
  67. * Context memory management
  68. ***************************************/
  69. struct ZSTD_CDict_s {
  70. const void* dictContent;
  71. size_t dictContentSize;
  72. ZSTD_dictContentType_e dictContentType; /* The dictContentType the CDict was created with */
  73. U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
  74. ZSTD_cwksp workspace;
  75. ZSTD_matchState_t matchState;
  76. ZSTD_compressedBlockState_t cBlockState;
  77. ZSTD_customMem customMem;
  78. U32 dictID;
  79. int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */
  80. ZSTD_paramSwitch_e useRowMatchFinder; /* Indicates whether the CDict was created with params that would use
  81. * row-based matchfinder. Unless the cdict is reloaded, we will use
  82. * the same greedy/lazy matchfinder at compression time.
  83. */
  84. }; /* typedef'd to ZSTD_CDict within "zstd.h" */
  85. ZSTD_CCtx* ZSTD_createCCtx(void)
  86. {
  87. return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
  88. }
  89. static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager)
  90. {
  91. assert(cctx != NULL);
  92. ZSTD_memset(cctx, 0, sizeof(*cctx));
  93. cctx->customMem = memManager;
  94. cctx->bmi2 = ZSTD_cpuSupportsBmi2();
  95. { size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters);
  96. assert(!ZSTD_isError(err));
  97. (void)err;
  98. }
  99. }
  100. ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
  101. {
  102. ZSTD_STATIC_ASSERT(zcss_init==0);
  103. ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
  104. if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
  105. { ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_customMalloc(sizeof(ZSTD_CCtx), customMem);
  106. if (!cctx) return NULL;
  107. ZSTD_initCCtx(cctx, customMem);
  108. return cctx;
  109. }
  110. }
  111. ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize)
  112. {
  113. ZSTD_cwksp ws;
  114. ZSTD_CCtx* cctx;
  115. if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL; /* minimum size */
  116. if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */
  117. ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
  118. cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx));
  119. if (cctx == NULL) return NULL;
  120. ZSTD_memset(cctx, 0, sizeof(ZSTD_CCtx));
  121. ZSTD_cwksp_move(&cctx->workspace, &ws);
  122. cctx->staticSize = workspaceSize;
  123. /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
  124. if (!ZSTD_cwksp_check_available(&cctx->workspace, ENTROPY_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL;
  125. cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
  126. cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
  127. cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, ENTROPY_WORKSPACE_SIZE);
  128. cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
  129. return cctx;
  130. }
  131. /**
  132. * Clears and frees all of the dictionaries in the CCtx.
  133. */
  134. static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx)
  135. {
  136. ZSTD_customFree(cctx->localDict.dictBuffer, cctx->customMem);
  137. ZSTD_freeCDict(cctx->localDict.cdict);
  138. ZSTD_memset(&cctx->localDict, 0, sizeof(cctx->localDict));
  139. ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));
  140. cctx->cdict = NULL;
  141. }
  142. static size_t ZSTD_sizeof_localDict(ZSTD_localDict dict)
  143. {
  144. size_t const bufferSize = dict.dictBuffer != NULL ? dict.dictSize : 0;
  145. size_t const cdictSize = ZSTD_sizeof_CDict(dict.cdict);
  146. return bufferSize + cdictSize;
  147. }
  148. static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
  149. {
  150. assert(cctx != NULL);
  151. assert(cctx->staticSize == 0);
  152. ZSTD_clearAllDicts(cctx);
  153. #ifdef ZSTD_MULTITHREAD
  154. ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL;
  155. #endif
  156. ZSTD_cwksp_free(&cctx->workspace, cctx->customMem);
  157. }
  158. size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
  159. {
  160. if (cctx==NULL) return 0; /* support free on NULL */
  161. RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
  162. "not compatible with static CCtx");
  163. {
  164. int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx);
  165. ZSTD_freeCCtxContent(cctx);
  166. if (!cctxInWorkspace) {
  167. ZSTD_customFree(cctx, cctx->customMem);
  168. }
  169. }
  170. return 0;
  171. }
  172. static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
  173. {
  174. #ifdef ZSTD_MULTITHREAD
  175. return ZSTDMT_sizeof_CCtx(cctx->mtctx);
  176. #else
  177. (void)cctx;
  178. return 0;
  179. #endif
  180. }
  181. size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
  182. {
  183. if (cctx==NULL) return 0; /* support sizeof on NULL */
  184. /* cctx may be in the workspace */
  185. return (cctx->workspace.workspace == cctx ? 0 : sizeof(*cctx))
  186. + ZSTD_cwksp_sizeof(&cctx->workspace)
  187. + ZSTD_sizeof_localDict(cctx->localDict)
  188. + ZSTD_sizeof_mtctx(cctx);
  189. }
  190. size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
  191. {
  192. return ZSTD_sizeof_CCtx(zcs); /* same object */
  193. }
  194. /* private API call, for dictBuilder only */
  195. const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
  196. /* Returns true if the strategy supports using a row based matchfinder */
  197. static int ZSTD_rowMatchFinderSupported(const ZSTD_strategy strategy) {
  198. return (strategy >= ZSTD_greedy && strategy <= ZSTD_lazy2);
  199. }
  200. /* Returns true if the strategy and useRowMatchFinder mode indicate that we will use the row based matchfinder
  201. * for this compression.
  202. */
  203. static int ZSTD_rowMatchFinderUsed(const ZSTD_strategy strategy, const ZSTD_paramSwitch_e mode) {
  204. assert(mode != ZSTD_ps_auto);
  205. return ZSTD_rowMatchFinderSupported(strategy) && (mode == ZSTD_ps_enable);
  206. }
  207. /* Returns row matchfinder usage given an initial mode and cParams */
  208. static ZSTD_paramSwitch_e ZSTD_resolveRowMatchFinderMode(ZSTD_paramSwitch_e mode,
  209. const ZSTD_compressionParameters* const cParams) {
  210. #if defined(ZSTD_ARCH_X86_SSE2) || defined(ZSTD_ARCH_ARM_NEON)
  211. int const kHasSIMD128 = 1;
  212. #else
  213. int const kHasSIMD128 = 0;
  214. #endif
  215. if (mode != ZSTD_ps_auto) return mode; /* if requested enabled, but no SIMD, we still will use row matchfinder */
  216. mode = ZSTD_ps_disable;
  217. if (!ZSTD_rowMatchFinderSupported(cParams->strategy)) return mode;
  218. if (kHasSIMD128) {
  219. if (cParams->windowLog > 14) mode = ZSTD_ps_enable;
  220. } else {
  221. if (cParams->windowLog > 17) mode = ZSTD_ps_enable;
  222. }
  223. return mode;
  224. }
  225. /* Returns block splitter usage (generally speaking, when using slower/stronger compression modes) */
  226. static ZSTD_paramSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_paramSwitch_e mode,
  227. const ZSTD_compressionParameters* const cParams) {
  228. if (mode != ZSTD_ps_auto) return mode;
  229. return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 17) ? ZSTD_ps_enable : ZSTD_ps_disable;
  230. }
  231. /* Returns 1 if the arguments indicate that we should allocate a chainTable, 0 otherwise */
  232. static int ZSTD_allocateChainTable(const ZSTD_strategy strategy,
  233. const ZSTD_paramSwitch_e useRowMatchFinder,
  234. const U32 forDDSDict) {
  235. assert(useRowMatchFinder != ZSTD_ps_auto);
  236. /* We always should allocate a chaintable if we are allocating a matchstate for a DDS dictionary matchstate.
  237. * We do not allocate a chaintable if we are using ZSTD_fast, or are using the row-based matchfinder.
  238. */
  239. return forDDSDict || ((strategy != ZSTD_fast) && !ZSTD_rowMatchFinderUsed(strategy, useRowMatchFinder));
  240. }
  241. /* Returns 1 if compression parameters are such that we should
  242. * enable long distance matching (wlog >= 27, strategy >= btopt).
  243. * Returns 0 otherwise.
  244. */
  245. static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm(ZSTD_paramSwitch_e mode,
  246. const ZSTD_compressionParameters* const cParams) {
  247. if (mode != ZSTD_ps_auto) return mode;
  248. return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27) ? ZSTD_ps_enable : ZSTD_ps_disable;
  249. }
  250. static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
  251. ZSTD_compressionParameters cParams)
  252. {
  253. ZSTD_CCtx_params cctxParams;
  254. /* should not matter, as all cParams are presumed properly defined */
  255. ZSTD_CCtxParams_init(&cctxParams, ZSTD_CLEVEL_DEFAULT);
  256. cctxParams.cParams = cParams;
  257. /* Adjust advanced params according to cParams */
  258. cctxParams.ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams.ldmParams.enableLdm, &cParams);
  259. if (cctxParams.ldmParams.enableLdm == ZSTD_ps_enable) {
  260. ZSTD_ldm_adjustParameters(&cctxParams.ldmParams, &cParams);
  261. assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog);
  262. assert(cctxParams.ldmParams.hashRateLog < 32);
  263. }
  264. cctxParams.useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.useBlockSplitter, &cParams);
  265. cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams);
  266. assert(!ZSTD_checkCParams(cParams));
  267. return cctxParams;
  268. }
  269. static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(
  270. ZSTD_customMem customMem)
  271. {
  272. ZSTD_CCtx_params* params;
  273. if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
  274. params = (ZSTD_CCtx_params*)ZSTD_customCalloc(
  275. sizeof(ZSTD_CCtx_params), customMem);
  276. if (!params) { return NULL; }
  277. ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
  278. params->customMem = customMem;
  279. return params;
  280. }
  281. ZSTD_CCtx_params* ZSTD_createCCtxParams(void)
  282. {
  283. return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem);
  284. }
  285. size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params)
  286. {
  287. if (params == NULL) { return 0; }
  288. ZSTD_customFree(params, params->customMem);
  289. return 0;
  290. }
  291. size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params)
  292. {
  293. return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
  294. }
  295. size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
  296. RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
  297. ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
  298. cctxParams->compressionLevel = compressionLevel;
  299. cctxParams->fParams.contentSizeFlag = 1;
  300. return 0;
  301. }
  302. #define ZSTD_NO_CLEVEL 0
  303. /**
  304. * Initializes the cctxParams from params and compressionLevel.
  305. * @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL.
  306. */
  307. static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_parameters const* params, int compressionLevel)
  308. {
  309. assert(!ZSTD_checkCParams(params->cParams));
  310. ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
  311. cctxParams->cParams = params->cParams;
  312. cctxParams->fParams = params->fParams;
  313. /* Should not matter, as all cParams are presumed properly defined.
  314. * But, set it for tracing anyway.
  315. */
  316. cctxParams->compressionLevel = compressionLevel;
  317. cctxParams->useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams->useRowMatchFinder, &params->cParams);
  318. cctxParams->useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->useBlockSplitter, &params->cParams);
  319. cctxParams->ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams->ldmParams.enableLdm, &params->cParams);
  320. DEBUGLOG(4, "ZSTD_CCtxParams_init_internal: useRowMatchFinder=%d, useBlockSplitter=%d ldm=%d",
  321. cctxParams->useRowMatchFinder, cctxParams->useBlockSplitter, cctxParams->ldmParams.enableLdm);
  322. }
  323. size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
  324. {
  325. RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
  326. FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
  327. ZSTD_CCtxParams_init_internal(cctxParams, &params, ZSTD_NO_CLEVEL);
  328. return 0;
  329. }
  330. /**
  331. * Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone.
  332. * @param param Validated zstd parameters.
  333. */
  334. static void ZSTD_CCtxParams_setZstdParams(
  335. ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params)
  336. {
  337. assert(!ZSTD_checkCParams(params->cParams));
  338. cctxParams->cParams = params->cParams;
  339. cctxParams->fParams = params->fParams;
  340. /* Should not matter, as all cParams are presumed properly defined.
  341. * But, set it for tracing anyway.
  342. */
  343. cctxParams->compressionLevel = ZSTD_NO_CLEVEL;
  344. }
  345. ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
  346. {
  347. ZSTD_bounds bounds = { 0, 0, 0 };
  348. switch(param)
  349. {
  350. case ZSTD_c_compressionLevel:
  351. bounds.lowerBound = ZSTD_minCLevel();
  352. bounds.upperBound = ZSTD_maxCLevel();
  353. return bounds;
  354. case ZSTD_c_windowLog:
  355. bounds.lowerBound = ZSTD_WINDOWLOG_MIN;
  356. bounds.upperBound = ZSTD_WINDOWLOG_MAX;
  357. return bounds;
  358. case ZSTD_c_hashLog:
  359. bounds.lowerBound = ZSTD_HASHLOG_MIN;
  360. bounds.upperBound = ZSTD_HASHLOG_MAX;
  361. return bounds;
  362. case ZSTD_c_chainLog:
  363. bounds.lowerBound = ZSTD_CHAINLOG_MIN;
  364. bounds.upperBound = ZSTD_CHAINLOG_MAX;
  365. return bounds;
  366. case ZSTD_c_searchLog:
  367. bounds.lowerBound = ZSTD_SEARCHLOG_MIN;
  368. bounds.upperBound = ZSTD_SEARCHLOG_MAX;
  369. return bounds;
  370. case ZSTD_c_minMatch:
  371. bounds.lowerBound = ZSTD_MINMATCH_MIN;
  372. bounds.upperBound = ZSTD_MINMATCH_MAX;
  373. return bounds;
  374. case ZSTD_c_targetLength:
  375. bounds.lowerBound = ZSTD_TARGETLENGTH_MIN;
  376. bounds.upperBound = ZSTD_TARGETLENGTH_MAX;
  377. return bounds;
  378. case ZSTD_c_strategy:
  379. bounds.lowerBound = ZSTD_STRATEGY_MIN;
  380. bounds.upperBound = ZSTD_STRATEGY_MAX;
  381. return bounds;
  382. case ZSTD_c_contentSizeFlag:
  383. bounds.lowerBound = 0;
  384. bounds.upperBound = 1;
  385. return bounds;
  386. case ZSTD_c_checksumFlag:
  387. bounds.lowerBound = 0;
  388. bounds.upperBound = 1;
  389. return bounds;
  390. case ZSTD_c_dictIDFlag:
  391. bounds.lowerBound = 0;
  392. bounds.upperBound = 1;
  393. return bounds;
  394. case ZSTD_c_nbWorkers:
  395. bounds.lowerBound = 0;
  396. #ifdef ZSTD_MULTITHREAD
  397. bounds.upperBound = ZSTDMT_NBWORKERS_MAX;
  398. #else
  399. bounds.upperBound = 0;
  400. #endif
  401. return bounds;
  402. case ZSTD_c_jobSize:
  403. bounds.lowerBound = 0;
  404. #ifdef ZSTD_MULTITHREAD
  405. bounds.upperBound = ZSTDMT_JOBSIZE_MAX;
  406. #else
  407. bounds.upperBound = 0;
  408. #endif
  409. return bounds;
  410. case ZSTD_c_overlapLog:
  411. #ifdef ZSTD_MULTITHREAD
  412. bounds.lowerBound = ZSTD_OVERLAPLOG_MIN;
  413. bounds.upperBound = ZSTD_OVERLAPLOG_MAX;
  414. #else
  415. bounds.lowerBound = 0;
  416. bounds.upperBound = 0;
  417. #endif
  418. return bounds;
  419. case ZSTD_c_enableDedicatedDictSearch:
  420. bounds.lowerBound = 0;
  421. bounds.upperBound = 1;
  422. return bounds;
  423. case ZSTD_c_enableLongDistanceMatching:
  424. bounds.lowerBound = 0;
  425. bounds.upperBound = 1;
  426. return bounds;
  427. case ZSTD_c_ldmHashLog:
  428. bounds.lowerBound = ZSTD_LDM_HASHLOG_MIN;
  429. bounds.upperBound = ZSTD_LDM_HASHLOG_MAX;
  430. return bounds;
  431. case ZSTD_c_ldmMinMatch:
  432. bounds.lowerBound = ZSTD_LDM_MINMATCH_MIN;
  433. bounds.upperBound = ZSTD_LDM_MINMATCH_MAX;
  434. return bounds;
  435. case ZSTD_c_ldmBucketSizeLog:
  436. bounds.lowerBound = ZSTD_LDM_BUCKETSIZELOG_MIN;
  437. bounds.upperBound = ZSTD_LDM_BUCKETSIZELOG_MAX;
  438. return bounds;
  439. case ZSTD_c_ldmHashRateLog:
  440. bounds.lowerBound = ZSTD_LDM_HASHRATELOG_MIN;
  441. bounds.upperBound = ZSTD_LDM_HASHRATELOG_MAX;
  442. return bounds;
  443. /* experimental parameters */
  444. case ZSTD_c_rsyncable:
  445. bounds.lowerBound = 0;
  446. bounds.upperBound = 1;
  447. return bounds;
  448. case ZSTD_c_forceMaxWindow :
  449. bounds.lowerBound = 0;
  450. bounds.upperBound = 1;
  451. return bounds;
  452. case ZSTD_c_format:
  453. ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
  454. bounds.lowerBound = ZSTD_f_zstd1;
  455. bounds.upperBound = ZSTD_f_zstd1_magicless; /* note : how to ensure at compile time that this is the highest value enum ? */
  456. return bounds;
  457. case ZSTD_c_forceAttachDict:
  458. ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceLoad);
  459. bounds.lowerBound = ZSTD_dictDefaultAttach;
  460. bounds.upperBound = ZSTD_dictForceLoad; /* note : how to ensure at compile time that this is the highest value enum ? */
  461. return bounds;
  462. case ZSTD_c_literalCompressionMode:
  463. ZSTD_STATIC_ASSERT(ZSTD_ps_auto < ZSTD_ps_enable && ZSTD_ps_enable < ZSTD_ps_disable);
  464. bounds.lowerBound = (int)ZSTD_ps_auto;
  465. bounds.upperBound = (int)ZSTD_ps_disable;
  466. return bounds;
  467. case ZSTD_c_targetCBlockSize:
  468. bounds.lowerBound = ZSTD_TARGETCBLOCKSIZE_MIN;
  469. bounds.upperBound = ZSTD_TARGETCBLOCKSIZE_MAX;
  470. return bounds;
  471. case ZSTD_c_srcSizeHint:
  472. bounds.lowerBound = ZSTD_SRCSIZEHINT_MIN;
  473. bounds.upperBound = ZSTD_SRCSIZEHINT_MAX;
  474. return bounds;
  475. case ZSTD_c_stableInBuffer:
  476. case ZSTD_c_stableOutBuffer:
  477. bounds.lowerBound = (int)ZSTD_bm_buffered;
  478. bounds.upperBound = (int)ZSTD_bm_stable;
  479. return bounds;
  480. case ZSTD_c_blockDelimiters:
  481. bounds.lowerBound = (int)ZSTD_sf_noBlockDelimiters;
  482. bounds.upperBound = (int)ZSTD_sf_explicitBlockDelimiters;
  483. return bounds;
  484. case ZSTD_c_validateSequences:
  485. bounds.lowerBound = 0;
  486. bounds.upperBound = 1;
  487. return bounds;
  488. case ZSTD_c_useBlockSplitter:
  489. bounds.lowerBound = (int)ZSTD_ps_auto;
  490. bounds.upperBound = (int)ZSTD_ps_disable;
  491. return bounds;
  492. case ZSTD_c_useRowMatchFinder:
  493. bounds.lowerBound = (int)ZSTD_ps_auto;
  494. bounds.upperBound = (int)ZSTD_ps_disable;
  495. return bounds;
  496. case ZSTD_c_deterministicRefPrefix:
  497. bounds.lowerBound = 0;
  498. bounds.upperBound = 1;
  499. return bounds;
  500. default:
  501. bounds.error = ERROR(parameter_unsupported);
  502. return bounds;
  503. }
  504. }
  505. /* ZSTD_cParam_clampBounds:
  506. * Clamps the value into the bounded range.
  507. */
  508. static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value)
  509. {
  510. ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
  511. if (ZSTD_isError(bounds.error)) return bounds.error;
  512. if (*value < bounds.lowerBound) *value = bounds.lowerBound;
  513. if (*value > bounds.upperBound) *value = bounds.upperBound;
  514. return 0;
  515. }
  516. #define BOUNDCHECK(cParam, val) { \
  517. RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
  518. parameter_outOfBound, "Param out of bounds"); \
  519. }
  520. static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
  521. {
  522. switch(param)
  523. {
  524. case ZSTD_c_compressionLevel:
  525. case ZSTD_c_hashLog:
  526. case ZSTD_c_chainLog:
  527. case ZSTD_c_searchLog:
  528. case ZSTD_c_minMatch:
  529. case ZSTD_c_targetLength:
  530. case ZSTD_c_strategy:
  531. return 1;
  532. case ZSTD_c_format:
  533. case ZSTD_c_windowLog:
  534. case ZSTD_c_contentSizeFlag:
  535. case ZSTD_c_checksumFlag:
  536. case ZSTD_c_dictIDFlag:
  537. case ZSTD_c_forceMaxWindow :
  538. case ZSTD_c_nbWorkers:
  539. case ZSTD_c_jobSize:
  540. case ZSTD_c_overlapLog:
  541. case ZSTD_c_rsyncable:
  542. case ZSTD_c_enableDedicatedDictSearch:
  543. case ZSTD_c_enableLongDistanceMatching:
  544. case ZSTD_c_ldmHashLog:
  545. case ZSTD_c_ldmMinMatch:
  546. case ZSTD_c_ldmBucketSizeLog:
  547. case ZSTD_c_ldmHashRateLog:
  548. case ZSTD_c_forceAttachDict:
  549. case ZSTD_c_literalCompressionMode:
  550. case ZSTD_c_targetCBlockSize:
  551. case ZSTD_c_srcSizeHint:
  552. case ZSTD_c_stableInBuffer:
  553. case ZSTD_c_stableOutBuffer:
  554. case ZSTD_c_blockDelimiters:
  555. case ZSTD_c_validateSequences:
  556. case ZSTD_c_useBlockSplitter:
  557. case ZSTD_c_useRowMatchFinder:
  558. case ZSTD_c_deterministicRefPrefix:
  559. default:
  560. return 0;
  561. }
  562. }
  563. size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
  564. {
  565. DEBUGLOG(4, "ZSTD_CCtx_setParameter (%i, %i)", (int)param, value);
  566. if (cctx->streamStage != zcss_init) {
  567. if (ZSTD_isUpdateAuthorized(param)) {
  568. cctx->cParamsChanged = 1;
  569. } else {
  570. RETURN_ERROR(stage_wrong, "can only set params in ctx init stage");
  571. } }
  572. switch(param)
  573. {
  574. case ZSTD_c_nbWorkers:
  575. RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported,
  576. "MT not compatible with static alloc");
  577. break;
  578. case ZSTD_c_compressionLevel:
  579. case ZSTD_c_windowLog:
  580. case ZSTD_c_hashLog:
  581. case ZSTD_c_chainLog:
  582. case ZSTD_c_searchLog:
  583. case ZSTD_c_minMatch:
  584. case ZSTD_c_targetLength:
  585. case ZSTD_c_strategy:
  586. case ZSTD_c_ldmHashRateLog:
  587. case ZSTD_c_format:
  588. case ZSTD_c_contentSizeFlag:
  589. case ZSTD_c_checksumFlag:
  590. case ZSTD_c_dictIDFlag:
  591. case ZSTD_c_forceMaxWindow:
  592. case ZSTD_c_forceAttachDict:
  593. case ZSTD_c_literalCompressionMode:
  594. case ZSTD_c_jobSize:
  595. case ZSTD_c_overlapLog:
  596. case ZSTD_c_rsyncable:
  597. case ZSTD_c_enableDedicatedDictSearch:
  598. case ZSTD_c_enableLongDistanceMatching:
  599. case ZSTD_c_ldmHashLog:
  600. case ZSTD_c_ldmMinMatch:
  601. case ZSTD_c_ldmBucketSizeLog:
  602. case ZSTD_c_targetCBlockSize:
  603. case ZSTD_c_srcSizeHint:
  604. case ZSTD_c_stableInBuffer:
  605. case ZSTD_c_stableOutBuffer:
  606. case ZSTD_c_blockDelimiters:
  607. case ZSTD_c_validateSequences:
  608. case ZSTD_c_useBlockSplitter:
  609. case ZSTD_c_useRowMatchFinder:
  610. case ZSTD_c_deterministicRefPrefix:
  611. break;
  612. default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
  613. }
  614. return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value);
  615. }
  616. size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
  617. ZSTD_cParameter param, int value)
  618. {
  619. DEBUGLOG(4, "ZSTD_CCtxParams_setParameter (%i, %i)", (int)param, value);
  620. switch(param)
  621. {
  622. case ZSTD_c_format :
  623. BOUNDCHECK(ZSTD_c_format, value);
  624. CCtxParams->format = (ZSTD_format_e)value;
  625. return (size_t)CCtxParams->format;
  626. case ZSTD_c_compressionLevel : {
  627. FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), "");
  628. if (value == 0)
  629. CCtxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
  630. else
  631. CCtxParams->compressionLevel = value;
  632. if (CCtxParams->compressionLevel >= 0) return (size_t)CCtxParams->compressionLevel;
  633. return 0; /* return type (size_t) cannot represent negative values */
  634. }
  635. case ZSTD_c_windowLog :
  636. if (value!=0) /* 0 => use default */
  637. BOUNDCHECK(ZSTD_c_windowLog, value);
  638. CCtxParams->cParams.windowLog = (U32)value;
  639. return CCtxParams->cParams.windowLog;
  640. case ZSTD_c_hashLog :
  641. if (value!=0) /* 0 => use default */
  642. BOUNDCHECK(ZSTD_c_hashLog, value);
  643. CCtxParams->cParams.hashLog = (U32)value;
  644. return CCtxParams->cParams.hashLog;
  645. case ZSTD_c_chainLog :
  646. if (value!=0) /* 0 => use default */
  647. BOUNDCHECK(ZSTD_c_chainLog, value);
  648. CCtxParams->cParams.chainLog = (U32)value;
  649. return CCtxParams->cParams.chainLog;
  650. case ZSTD_c_searchLog :
  651. if (value!=0) /* 0 => use default */
  652. BOUNDCHECK(ZSTD_c_searchLog, value);
  653. CCtxParams->cParams.searchLog = (U32)value;
  654. return (size_t)value;
  655. case ZSTD_c_minMatch :
  656. if (value!=0) /* 0 => use default */
  657. BOUNDCHECK(ZSTD_c_minMatch, value);
  658. CCtxParams->cParams.minMatch = value;
  659. return CCtxParams->cParams.minMatch;
  660. case ZSTD_c_targetLength :
  661. BOUNDCHECK(ZSTD_c_targetLength, value);
  662. CCtxParams->cParams.targetLength = value;
  663. return CCtxParams->cParams.targetLength;
  664. case ZSTD_c_strategy :
  665. if (value!=0) /* 0 => use default */
  666. BOUNDCHECK(ZSTD_c_strategy, value);
  667. CCtxParams->cParams.strategy = (ZSTD_strategy)value;
  668. return (size_t)CCtxParams->cParams.strategy;
  669. case ZSTD_c_contentSizeFlag :
  670. /* Content size written in frame header _when known_ (default:1) */
  671. DEBUGLOG(4, "set content size flag = %u", (value!=0));
  672. CCtxParams->fParams.contentSizeFlag = value != 0;
  673. return CCtxParams->fParams.contentSizeFlag;
  674. case ZSTD_c_checksumFlag :
  675. /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
  676. CCtxParams->fParams.checksumFlag = value != 0;
  677. return CCtxParams->fParams.checksumFlag;
  678. case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
  679. DEBUGLOG(4, "set dictIDFlag = %u", (value!=0));
  680. CCtxParams->fParams.noDictIDFlag = !value;
  681. return !CCtxParams->fParams.noDictIDFlag;
  682. case ZSTD_c_forceMaxWindow :
  683. CCtxParams->forceWindow = (value != 0);
  684. return CCtxParams->forceWindow;
  685. case ZSTD_c_forceAttachDict : {
  686. const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value;
  687. BOUNDCHECK(ZSTD_c_forceAttachDict, pref);
  688. CCtxParams->attachDictPref = pref;
  689. return CCtxParams->attachDictPref;
  690. }
  691. case ZSTD_c_literalCompressionMode : {
  692. const ZSTD_paramSwitch_e lcm = (ZSTD_paramSwitch_e)value;
  693. BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm);
  694. CCtxParams->literalCompressionMode = lcm;
  695. return CCtxParams->literalCompressionMode;
  696. }
  697. case ZSTD_c_nbWorkers :
  698. #ifndef ZSTD_MULTITHREAD
  699. RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
  700. return 0;
  701. #else
  702. FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), "");
  703. CCtxParams->nbWorkers = value;
  704. return CCtxParams->nbWorkers;
  705. #endif
  706. case ZSTD_c_jobSize :
  707. #ifndef ZSTD_MULTITHREAD
  708. RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
  709. return 0;
  710. #else
  711. /* Adjust to the minimum non-default value. */
  712. if (value != 0 && value < ZSTDMT_JOBSIZE_MIN)
  713. value = ZSTDMT_JOBSIZE_MIN;
  714. FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), "");
  715. assert(value >= 0);
  716. CCtxParams->jobSize = value;
  717. return CCtxParams->jobSize;
  718. #endif
  719. case ZSTD_c_overlapLog :
  720. #ifndef ZSTD_MULTITHREAD
  721. RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
  722. return 0;
  723. #else
  724. FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value), "");
  725. CCtxParams->overlapLog = value;
  726. return CCtxParams->overlapLog;
  727. #endif
  728. case ZSTD_c_rsyncable :
  729. #ifndef ZSTD_MULTITHREAD
  730. RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
  731. return 0;
  732. #else
  733. FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value), "");
  734. CCtxParams->rsyncable = value;
  735. return CCtxParams->rsyncable;
  736. #endif
  737. case ZSTD_c_enableDedicatedDictSearch :
  738. CCtxParams->enableDedicatedDictSearch = (value!=0);
  739. return CCtxParams->enableDedicatedDictSearch;
  740. case ZSTD_c_enableLongDistanceMatching :
  741. CCtxParams->ldmParams.enableLdm = (ZSTD_paramSwitch_e)value;
  742. return CCtxParams->ldmParams.enableLdm;
  743. case ZSTD_c_ldmHashLog :
  744. if (value!=0) /* 0 ==> auto */
  745. BOUNDCHECK(ZSTD_c_ldmHashLog, value);
  746. CCtxParams->ldmParams.hashLog = value;
  747. return CCtxParams->ldmParams.hashLog;
  748. case ZSTD_c_ldmMinMatch :
  749. if (value!=0) /* 0 ==> default */
  750. BOUNDCHECK(ZSTD_c_ldmMinMatch, value);
  751. CCtxParams->ldmParams.minMatchLength = value;
  752. return CCtxParams->ldmParams.minMatchLength;
  753. case ZSTD_c_ldmBucketSizeLog :
  754. if (value!=0) /* 0 ==> default */
  755. BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value);
  756. CCtxParams->ldmParams.bucketSizeLog = value;
  757. return CCtxParams->ldmParams.bucketSizeLog;
  758. case ZSTD_c_ldmHashRateLog :
  759. if (value!=0) /* 0 ==> default */
  760. BOUNDCHECK(ZSTD_c_ldmHashRateLog, value);
  761. CCtxParams->ldmParams.hashRateLog = value;
  762. return CCtxParams->ldmParams.hashRateLog;
  763. case ZSTD_c_targetCBlockSize :
  764. if (value!=0) /* 0 ==> default */
  765. BOUNDCHECK(ZSTD_c_targetCBlockSize, value);
  766. CCtxParams->targetCBlockSize = value;
  767. return CCtxParams->targetCBlockSize;
  768. case ZSTD_c_srcSizeHint :
  769. if (value!=0) /* 0 ==> default */
  770. BOUNDCHECK(ZSTD_c_srcSizeHint, value);
  771. CCtxParams->srcSizeHint = value;
  772. return CCtxParams->srcSizeHint;
  773. case ZSTD_c_stableInBuffer:
  774. BOUNDCHECK(ZSTD_c_stableInBuffer, value);
  775. CCtxParams->inBufferMode = (ZSTD_bufferMode_e)value;
  776. return CCtxParams->inBufferMode;
  777. case ZSTD_c_stableOutBuffer:
  778. BOUNDCHECK(ZSTD_c_stableOutBuffer, value);
  779. CCtxParams->outBufferMode = (ZSTD_bufferMode_e)value;
  780. return CCtxParams->outBufferMode;
  781. case ZSTD_c_blockDelimiters:
  782. BOUNDCHECK(ZSTD_c_blockDelimiters, value);
  783. CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value;
  784. return CCtxParams->blockDelimiters;
  785. case ZSTD_c_validateSequences:
  786. BOUNDCHECK(ZSTD_c_validateSequences, value);
  787. CCtxParams->validateSequences = value;
  788. return CCtxParams->validateSequences;
  789. case ZSTD_c_useBlockSplitter:
  790. BOUNDCHECK(ZSTD_c_useBlockSplitter, value);
  791. CCtxParams->useBlockSplitter = (ZSTD_paramSwitch_e)value;
  792. return CCtxParams->useBlockSplitter;
  793. case ZSTD_c_useRowMatchFinder:
  794. BOUNDCHECK(ZSTD_c_useRowMatchFinder, value);
  795. CCtxParams->useRowMatchFinder = (ZSTD_paramSwitch_e)value;
  796. return CCtxParams->useRowMatchFinder;
  797. case ZSTD_c_deterministicRefPrefix:
  798. BOUNDCHECK(ZSTD_c_deterministicRefPrefix, value);
  799. CCtxParams->deterministicRefPrefix = !!value;
  800. return CCtxParams->deterministicRefPrefix;
  801. default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
  802. }
  803. }
  804. size_t ZSTD_CCtx_getParameter(ZSTD_CCtx const* cctx, ZSTD_cParameter param, int* value)
  805. {
  806. return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value);
  807. }
  808. size_t ZSTD_CCtxParams_getParameter(
  809. ZSTD_CCtx_params const* CCtxParams, ZSTD_cParameter param, int* value)
  810. {
  811. switch(param)
  812. {
  813. case ZSTD_c_format :
  814. *value = CCtxParams->format;
  815. break;
  816. case ZSTD_c_compressionLevel :
  817. *value = CCtxParams->compressionLevel;
  818. break;
  819. case ZSTD_c_windowLog :
  820. *value = (int)CCtxParams->cParams.windowLog;
  821. break;
  822. case ZSTD_c_hashLog :
  823. *value = (int)CCtxParams->cParams.hashLog;
  824. break;
  825. case ZSTD_c_chainLog :
  826. *value = (int)CCtxParams->cParams.chainLog;
  827. break;
  828. case ZSTD_c_searchLog :
  829. *value = CCtxParams->cParams.searchLog;
  830. break;
  831. case ZSTD_c_minMatch :
  832. *value = CCtxParams->cParams.minMatch;
  833. break;
  834. case ZSTD_c_targetLength :
  835. *value = CCtxParams->cParams.targetLength;
  836. break;
  837. case ZSTD_c_strategy :
  838. *value = (unsigned)CCtxParams->cParams.strategy;
  839. break;
  840. case ZSTD_c_contentSizeFlag :
  841. *value = CCtxParams->fParams.contentSizeFlag;
  842. break;
  843. case ZSTD_c_checksumFlag :
  844. *value = CCtxParams->fParams.checksumFlag;
  845. break;
  846. case ZSTD_c_dictIDFlag :
  847. *value = !CCtxParams->fParams.noDictIDFlag;
  848. break;
  849. case ZSTD_c_forceMaxWindow :
  850. *value = CCtxParams->forceWindow;
  851. break;
  852. case ZSTD_c_forceAttachDict :
  853. *value = CCtxParams->attachDictPref;
  854. break;
  855. case ZSTD_c_literalCompressionMode :
  856. *value = CCtxParams->literalCompressionMode;
  857. break;
  858. case ZSTD_c_nbWorkers :
  859. #ifndef ZSTD_MULTITHREAD
  860. assert(CCtxParams->nbWorkers == 0);
  861. #endif
  862. *value = CCtxParams->nbWorkers;
  863. break;
  864. case ZSTD_c_jobSize :
  865. #ifndef ZSTD_MULTITHREAD
  866. RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
  867. #else
  868. assert(CCtxParams->jobSize <= INT_MAX);
  869. *value = (int)CCtxParams->jobSize;
  870. break;
  871. #endif
  872. case ZSTD_c_overlapLog :
  873. #ifndef ZSTD_MULTITHREAD
  874. RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
  875. #else
  876. *value = CCtxParams->overlapLog;
  877. break;
  878. #endif
  879. case ZSTD_c_rsyncable :
  880. #ifndef ZSTD_MULTITHREAD
  881. RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
  882. #else
  883. *value = CCtxParams->rsyncable;
  884. break;
  885. #endif
  886. case ZSTD_c_enableDedicatedDictSearch :
  887. *value = CCtxParams->enableDedicatedDictSearch;
  888. break;
  889. case ZSTD_c_enableLongDistanceMatching :
  890. *value = CCtxParams->ldmParams.enableLdm;
  891. break;
  892. case ZSTD_c_ldmHashLog :
  893. *value = CCtxParams->ldmParams.hashLog;
  894. break;
  895. case ZSTD_c_ldmMinMatch :
  896. *value = CCtxParams->ldmParams.minMatchLength;
  897. break;
  898. case ZSTD_c_ldmBucketSizeLog :
  899. *value = CCtxParams->ldmParams.bucketSizeLog;
  900. break;
  901. case ZSTD_c_ldmHashRateLog :
  902. *value = CCtxParams->ldmParams.hashRateLog;
  903. break;
  904. case ZSTD_c_targetCBlockSize :
  905. *value = (int)CCtxParams->targetCBlockSize;
  906. break;
  907. case ZSTD_c_srcSizeHint :
  908. *value = (int)CCtxParams->srcSizeHint;
  909. break;
  910. case ZSTD_c_stableInBuffer :
  911. *value = (int)CCtxParams->inBufferMode;
  912. break;
  913. case ZSTD_c_stableOutBuffer :
  914. *value = (int)CCtxParams->outBufferMode;
  915. break;
  916. case ZSTD_c_blockDelimiters :
  917. *value = (int)CCtxParams->blockDelimiters;
  918. break;
  919. case ZSTD_c_validateSequences :
  920. *value = (int)CCtxParams->validateSequences;
  921. break;
  922. case ZSTD_c_useBlockSplitter :
  923. *value = (int)CCtxParams->useBlockSplitter;
  924. break;
  925. case ZSTD_c_useRowMatchFinder :
  926. *value = (int)CCtxParams->useRowMatchFinder;
  927. break;
  928. case ZSTD_c_deterministicRefPrefix:
  929. *value = (int)CCtxParams->deterministicRefPrefix;
  930. break;
  931. default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
  932. }
  933. return 0;
  934. }
  935. /** ZSTD_CCtx_setParametersUsingCCtxParams() :
  936. * just applies `params` into `cctx`
  937. * no action is performed, parameters are merely stored.
  938. * If ZSTDMT is enabled, parameters are pushed to cctx->mtctx.
  939. * This is possible even if a compression is ongoing.
  940. * In which case, new parameters will be applied on the fly, starting with next compression job.
  941. */
  942. size_t ZSTD_CCtx_setParametersUsingCCtxParams(
  943. ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
  944. {
  945. DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams");
  946. RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
  947. "The context is in the wrong stage!");
  948. RETURN_ERROR_IF(cctx->cdict, stage_wrong,
  949. "Can't override parameters with cdict attached (some must "
  950. "be inherited from the cdict).");
  951. cctx->requestedParams = *params;
  952. return 0;
  953. }
  954. size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
  955. {
  956. DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
  957. RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
  958. "Can't set pledgedSrcSize when not in init stage.");
  959. cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
  960. return 0;
  961. }
  962. static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(
  963. int const compressionLevel,
  964. size_t const dictSize);
  965. static int ZSTD_dedicatedDictSearch_isSupported(
  966. const ZSTD_compressionParameters* cParams);
  967. static void ZSTD_dedicatedDictSearch_revertCParams(
  968. ZSTD_compressionParameters* cParams);
  969. /**
  970. * Initializes the local dict using the requested parameters.
  971. * NOTE: This does not use the pledged src size, because it may be used for more
  972. * than one compression.
  973. */
  974. static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
  975. {
  976. ZSTD_localDict* const dl = &cctx->localDict;
  977. if (dl->dict == NULL) {
  978. /* No local dictionary. */
  979. assert(dl->dictBuffer == NULL);
  980. assert(dl->cdict == NULL);
  981. assert(dl->dictSize == 0);
  982. return 0;
  983. }
  984. if (dl->cdict != NULL) {
  985. assert(cctx->cdict == dl->cdict);
  986. /* Local dictionary already initialized. */
  987. return 0;
  988. }
  989. assert(dl->dictSize > 0);
  990. assert(cctx->cdict == NULL);
  991. assert(cctx->prefixDict.dict == NULL);
  992. dl->cdict = ZSTD_createCDict_advanced2(
  993. dl->dict,
  994. dl->dictSize,
  995. ZSTD_dlm_byRef,
  996. dl->dictContentType,
  997. &cctx->requestedParams,
  998. cctx->customMem);
  999. RETURN_ERROR_IF(!dl->cdict, memory_allocation, "ZSTD_createCDict_advanced failed");
  1000. cctx->cdict = dl->cdict;
  1001. return 0;
  1002. }
  1003. size_t ZSTD_CCtx_loadDictionary_advanced(
  1004. ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
  1005. ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
  1006. {
  1007. RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
  1008. "Can't load a dictionary when ctx is not in init stage.");
  1009. DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
  1010. ZSTD_clearAllDicts(cctx); /* in case one already exists */
  1011. if (dict == NULL || dictSize == 0) /* no dictionary mode */
  1012. return 0;
  1013. if (dictLoadMethod == ZSTD_dlm_byRef) {
  1014. cctx->localDict.dict = dict;
  1015. } else {
  1016. void* dictBuffer;
  1017. RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
  1018. "no malloc for static CCtx");
  1019. dictBuffer = ZSTD_customMalloc(dictSize, cctx->customMem);
  1020. RETURN_ERROR_IF(!dictBuffer, memory_allocation, "NULL pointer!");
  1021. ZSTD_memcpy(dictBuffer, dict, dictSize);
  1022. cctx->localDict.dictBuffer = dictBuffer;
  1023. cctx->localDict.dict = dictBuffer;
  1024. }
  1025. cctx->localDict.dictSize = dictSize;
  1026. cctx->localDict.dictContentType = dictContentType;
  1027. return 0;
  1028. }
  1029. size_t ZSTD_CCtx_loadDictionary_byReference(
  1030. ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
  1031. {
  1032. return ZSTD_CCtx_loadDictionary_advanced(
  1033. cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
  1034. }
  1035. size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
  1036. {
  1037. return ZSTD_CCtx_loadDictionary_advanced(
  1038. cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
  1039. }
  1040. size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
  1041. {
  1042. RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
  1043. "Can't ref a dict when ctx not in init stage.");
  1044. /* Free the existing local cdict (if any) to save memory. */
  1045. ZSTD_clearAllDicts(cctx);
  1046. cctx->cdict = cdict;
  1047. return 0;
  1048. }
  1049. size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool)
  1050. {
  1051. RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
  1052. "Can't ref a pool when ctx not in init stage.");
  1053. cctx->pool = pool;
  1054. return 0;
  1055. }
  1056. size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize)
  1057. {
  1058. return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent);
  1059. }
  1060. size_t ZSTD_CCtx_refPrefix_advanced(
  1061. ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
  1062. {
  1063. RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
  1064. "Can't ref a prefix when ctx not in init stage.");
  1065. ZSTD_clearAllDicts(cctx);
  1066. if (prefix != NULL && prefixSize > 0) {
  1067. cctx->prefixDict.dict = prefix;
  1068. cctx->prefixDict.dictSize = prefixSize;
  1069. cctx->prefixDict.dictContentType = dictContentType;
  1070. }
  1071. return 0;
  1072. }
  1073. /*! ZSTD_CCtx_reset() :
  1074. * Also dumps dictionary */
  1075. size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
  1076. {
  1077. if ( (reset == ZSTD_reset_session_only)
  1078. || (reset == ZSTD_reset_session_and_parameters) ) {
  1079. cctx->streamStage = zcss_init;
  1080. cctx->pledgedSrcSizePlusOne = 0;
  1081. }
  1082. if ( (reset == ZSTD_reset_parameters)
  1083. || (reset == ZSTD_reset_session_and_parameters) ) {
  1084. RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
  1085. "Can't reset parameters only when not in init stage.");
  1086. ZSTD_clearAllDicts(cctx);
  1087. return ZSTD_CCtxParams_reset(&cctx->requestedParams);
  1088. }
  1089. return 0;
  1090. }
  1091. /** ZSTD_checkCParams() :
  1092. control CParam values remain within authorized range.
  1093. @return : 0, or an error code if one value is beyond authorized range */
  1094. size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
  1095. {
  1096. BOUNDCHECK(ZSTD_c_windowLog, (int)cParams.windowLog);
  1097. BOUNDCHECK(ZSTD_c_chainLog, (int)cParams.chainLog);
  1098. BOUNDCHECK(ZSTD_c_hashLog, (int)cParams.hashLog);
  1099. BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog);
  1100. BOUNDCHECK(ZSTD_c_minMatch, (int)cParams.minMatch);
  1101. BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength);
  1102. BOUNDCHECK(ZSTD_c_strategy, cParams.strategy);
  1103. return 0;
  1104. }
  1105. /** ZSTD_clampCParams() :
  1106. * make CParam values within valid range.
  1107. * @return : valid CParams */
  1108. static ZSTD_compressionParameters
  1109. ZSTD_clampCParams(ZSTD_compressionParameters cParams)
  1110. {
  1111. # define CLAMP_TYPE(cParam, val, type) { \
  1112. ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \
  1113. if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound; \
  1114. else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \
  1115. }
  1116. # define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned)
  1117. CLAMP(ZSTD_c_windowLog, cParams.windowLog);
  1118. CLAMP(ZSTD_c_chainLog, cParams.chainLog);
  1119. CLAMP(ZSTD_c_hashLog, cParams.hashLog);
  1120. CLAMP(ZSTD_c_searchLog, cParams.searchLog);
  1121. CLAMP(ZSTD_c_minMatch, cParams.minMatch);
  1122. CLAMP(ZSTD_c_targetLength,cParams.targetLength);
  1123. CLAMP_TYPE(ZSTD_c_strategy,cParams.strategy, ZSTD_strategy);
  1124. return cParams;
  1125. }
  1126. /** ZSTD_cycleLog() :
  1127. * condition for correct operation : hashLog > 1 */
  1128. U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
  1129. {
  1130. U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
  1131. return hashLog - btScale;
  1132. }
  1133. /** ZSTD_dictAndWindowLog() :
  1134. * Returns an adjusted window log that is large enough to fit the source and the dictionary.
  1135. * The zstd format says that the entire dictionary is valid if one byte of the dictionary
  1136. * is within the window. So the hashLog and chainLog should be large enough to reference both
  1137. * the dictionary and the window. So we must use this adjusted dictAndWindowLog when downsizing
  1138. * the hashLog and windowLog.
  1139. * NOTE: srcSize must not be ZSTD_CONTENTSIZE_UNKNOWN.
  1140. */
  1141. static U32 ZSTD_dictAndWindowLog(U32 windowLog, U64 srcSize, U64 dictSize)
  1142. {
  1143. const U64 maxWindowSize = 1ULL << ZSTD_WINDOWLOG_MAX;
  1144. /* No dictionary ==> No change */
  1145. if (dictSize == 0) {
  1146. return windowLog;
  1147. }
  1148. assert(windowLog <= ZSTD_WINDOWLOG_MAX);
  1149. assert(srcSize != ZSTD_CONTENTSIZE_UNKNOWN); /* Handled in ZSTD_adjustCParams_internal() */
  1150. {
  1151. U64 const windowSize = 1ULL << windowLog;
  1152. U64 const dictAndWindowSize = dictSize + windowSize;
  1153. /* If the window size is already large enough to fit both the source and the dictionary
  1154. * then just use the window size. Otherwise adjust so that it fits the dictionary and
  1155. * the window.
  1156. */
  1157. if (windowSize >= dictSize + srcSize) {
  1158. return windowLog; /* Window size large enough already */
  1159. } else if (dictAndWindowSize >= maxWindowSize) {
  1160. return ZSTD_WINDOWLOG_MAX; /* Larger than max window log */
  1161. } else {
  1162. return ZSTD_highbit32((U32)dictAndWindowSize - 1) + 1;
  1163. }
  1164. }
  1165. }
  1166. /** ZSTD_adjustCParams_internal() :
  1167. * optimize `cPar` for a specified input (`srcSize` and `dictSize`).
  1168. * mostly downsize to reduce memory consumption and initialization latency.
  1169. * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known.
  1170. * `mode` is the mode for parameter adjustment. See docs for `ZSTD_cParamMode_e`.
  1171. * note : `srcSize==0` means 0!
  1172. * condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */
  1173. static ZSTD_compressionParameters
  1174. ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
  1175. unsigned long long srcSize,
  1176. size_t dictSize,
  1177. ZSTD_cParamMode_e mode)
  1178. {
  1179. const U64 minSrcSize = 513; /* (1<<9) + 1 */
  1180. const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
  1181. assert(ZSTD_checkCParams(cPar)==0);
  1182. switch (mode) {
  1183. case ZSTD_cpm_unknown:
  1184. case ZSTD_cpm_noAttachDict:
  1185. /* If we don't know the source size, don't make any
  1186. * assumptions about it. We will already have selected
  1187. * smaller parameters if a dictionary is in use.
  1188. */
  1189. break;
  1190. case ZSTD_cpm_createCDict:
  1191. /* Assume a small source size when creating a dictionary
  1192. * with an unknown source size.
  1193. */
  1194. if (dictSize && srcSize == ZSTD_CONTENTSIZE_UNKNOWN)
  1195. srcSize = minSrcSize;
  1196. break;
  1197. case ZSTD_cpm_attachDict:
  1198. /* Dictionary has its own dedicated parameters which have
  1199. * already been selected. We are selecting parameters
  1200. * for only the source.
  1201. */
  1202. dictSize = 0;
  1203. break;
  1204. default:
  1205. assert(0);
  1206. break;
  1207. }
  1208. /* resize windowLog if input is small enough, to use less memory */
  1209. if ( (srcSize < maxWindowResize)
  1210. && (dictSize < maxWindowResize) ) {
  1211. U32 const tSize = (U32)(srcSize + dictSize);
  1212. static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;
  1213. U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :
  1214. ZSTD_highbit32(tSize-1) + 1;
  1215. if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
  1216. }
  1217. if (srcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
  1218. U32 const dictAndWindowLog = ZSTD_dictAndWindowLog(cPar.windowLog, (U64)srcSize, (U64)dictSize);
  1219. U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
  1220. if (cPar.hashLog > dictAndWindowLog+1) cPar.hashLog = dictAndWindowLog+1;
  1221. if (cycleLog > dictAndWindowLog)
  1222. cPar.chainLog -= (cycleLog - dictAndWindowLog);
  1223. }
  1224. if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
  1225. cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* minimum wlog required for valid frame header */
  1226. return cPar;
  1227. }
  1228. ZSTD_compressionParameters
  1229. ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
  1230. unsigned long long srcSize,
  1231. size_t dictSize)
  1232. {
  1233. cPar = ZSTD_clampCParams(cPar); /* resulting cPar is necessarily valid (all parameters within range) */
  1234. if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN;
  1235. return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown);
  1236. }
  1237. static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
  1238. static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
  1239. static void ZSTD_overrideCParams(
  1240. ZSTD_compressionParameters* cParams,
  1241. const ZSTD_compressionParameters* overrides)
  1242. {
  1243. if (overrides->windowLog) cParams->windowLog = overrides->windowLog;
  1244. if (overrides->hashLog) cParams->hashLog = overrides->hashLog;
  1245. if (overrides->chainLog) cParams->chainLog = overrides->chainLog;
  1246. if (overrides->searchLog) cParams->searchLog = overrides->searchLog;
  1247. if (overrides->minMatch) cParams->minMatch = overrides->minMatch;
  1248. if (overrides->targetLength) cParams->targetLength = overrides->targetLength;
  1249. if (overrides->strategy) cParams->strategy = overrides->strategy;
  1250. }
  1251. ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
  1252. const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
  1253. {
  1254. ZSTD_compressionParameters cParams;
  1255. if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) {
  1256. srcSizeHint = CCtxParams->srcSizeHint;
  1257. }
  1258. cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode);
  1259. if (CCtxParams->ldmParams.enableLdm == ZSTD_ps_enable) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
  1260. ZSTD_overrideCParams(&cParams, &CCtxParams->cParams);
  1261. assert(!ZSTD_checkCParams(cParams));
  1262. /* srcSizeHint == 0 means 0 */
  1263. return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode);
  1264. }
  1265. static size_t
  1266. ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
  1267. const ZSTD_paramSwitch_e useRowMatchFinder,
  1268. const U32 enableDedicatedDictSearch,
  1269. const U32 forCCtx)
  1270. {
  1271. /* chain table size should be 0 for fast or row-hash strategies */
  1272. size_t const chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder, enableDedicatedDictSearch && !forCCtx)
  1273. ? ((size_t)1 << cParams->chainLog)
  1274. : 0;
  1275. size_t const hSize = ((size_t)1) << cParams->hashLog;
  1276. U32 const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
  1277. size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
  1278. /* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't
  1279. * surrounded by redzones in ASAN. */
  1280. size_t const tableSpace = chainSize * sizeof(U32)
  1281. + hSize * sizeof(U32)
  1282. + h3Size * sizeof(U32);
  1283. size_t const optPotentialSpace =
  1284. ZSTD_cwksp_aligned_alloc_size((MaxML+1) * sizeof(U32))
  1285. + ZSTD_cwksp_aligned_alloc_size((MaxLL+1) * sizeof(U32))
  1286. + ZSTD_cwksp_aligned_alloc_size((MaxOff+1) * sizeof(U32))
  1287. + ZSTD_cwksp_aligned_alloc_size((1<<Litbits) * sizeof(U32))
  1288. + ZSTD_cwksp_aligned_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t))
  1289. + ZSTD_cwksp_aligned_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
  1290. size_t const lazyAdditionalSpace = ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)
  1291. ? ZSTD_cwksp_aligned_alloc_size(hSize*sizeof(U16))
  1292. : 0;
  1293. size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))
  1294. ? optPotentialSpace
  1295. : 0;
  1296. size_t const slackSpace = ZSTD_cwksp_slack_space_required();
  1297. /* tables are guaranteed to be sized in multiples of 64 bytes (or 16 uint32_t) */
  1298. ZSTD_STATIC_ASSERT(ZSTD_HASHLOG_MIN >= 4 && ZSTD_WINDOWLOG_MIN >= 4 && ZSTD_CHAINLOG_MIN >= 4);
  1299. assert(useRowMatchFinder != ZSTD_ps_auto);
  1300. DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u",
  1301. (U32)chainSize, (U32)hSize, (U32)h3Size);
  1302. return tableSpace + optSpace + slackSpace + lazyAdditionalSpace;
  1303. }
  1304. static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
  1305. const ZSTD_compressionParameters* cParams,
  1306. const ldmParams_t* ldmParams,
  1307. const int isStatic,
  1308. const ZSTD_paramSwitch_e useRowMatchFinder,
  1309. const size_t buffInSize,
  1310. const size_t buffOutSize,
  1311. const U64 pledgedSrcSize)
  1312. {
  1313. size_t const windowSize = (size_t) BOUNDED(1ULL, 1ULL << cParams->windowLog, pledgedSrcSize);
  1314. size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
  1315. U32 const divider = (cParams->minMatch==3) ? 3 : 4;
  1316. size_t const maxNbSeq = blockSize / divider;
  1317. size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize)
  1318. + ZSTD_cwksp_aligned_alloc_size(maxNbSeq * sizeof(seqDef))
  1319. + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE));
  1320. size_t const entropySpace = ZSTD_cwksp_alloc_size(ENTROPY_WORKSPACE_SIZE);
  1321. size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t));
  1322. size_t const matchStateSize = ZSTD_sizeof_matchState(cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 0, /* forCCtx */ 1);
  1323. size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams);
  1324. size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize);
  1325. size_t const ldmSeqSpace = ldmParams->enableLdm == ZSTD_ps_enable ?
  1326. ZSTD_cwksp_aligned_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0;
  1327. size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize)
  1328. + ZSTD_cwksp_alloc_size(buffOutSize);
  1329. size_t const cctxSpace = isStatic ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0;
  1330. size_t const neededSpace =
  1331. cctxSpace +
  1332. entropySpace +
  1333. blockStateSpace +
  1334. ldmSpace +
  1335. ldmSeqSpace +
  1336. matchStateSize +
  1337. tokenSpace +
  1338. bufferSpace;
  1339. DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace);
  1340. return neededSpace;
  1341. }
  1342. size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
  1343. {
  1344. ZSTD_compressionParameters const cParams =
  1345. ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
  1346. ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder,
  1347. &cParams);
  1348. RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
  1349. /* estimateCCtxSize is for one-shot compression. So no buffers should
  1350. * be needed. However, we still allocate two 0-sized buffers, which can
  1351. * take space under ASAN. */
  1352. return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
  1353. &cParams, &params->ldmParams, 1, useRowMatchFinder, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN);
  1354. }
  1355. size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
  1356. {
  1357. ZSTD_CCtx_params initialParams = ZSTD_makeCCtxParamsFromCParams(cParams);
  1358. if (ZSTD_rowMatchFinderSupported(cParams.strategy)) {
  1359. /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */
  1360. size_t noRowCCtxSize;
  1361. size_t rowCCtxSize;
  1362. initialParams.useRowMatchFinder = ZSTD_ps_disable;
  1363. noRowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams);
  1364. initialParams.useRowMatchFinder = ZSTD_ps_enable;
  1365. rowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams);
  1366. return MAX(noRowCCtxSize, rowCCtxSize);
  1367. } else {
  1368. return ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams);
  1369. }
  1370. }
  1371. static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel)
  1372. {
  1373. int tier = 0;
  1374. size_t largestSize = 0;
  1375. static const unsigned long long srcSizeTiers[4] = {16 KB, 128 KB, 256 KB, ZSTD_CONTENTSIZE_UNKNOWN};
  1376. for (; tier < 4; ++tier) {
  1377. /* Choose the set of cParams for a given level across all srcSizes that give the largest cctxSize */
  1378. ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeTiers[tier], 0, ZSTD_cpm_noAttachDict);
  1379. largestSize = MAX(ZSTD_estimateCCtxSize_usingCParams(cParams), largestSize);
  1380. }
  1381. return largestSize;
  1382. }
  1383. size_t ZSTD_estimateCCtxSize(int compressionLevel)
  1384. {
  1385. int level;
  1386. size_t memBudget = 0;
  1387. for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
  1388. /* Ensure monotonically increasing memory usage as compression level increases */
  1389. size_t const newMB = ZSTD_estimateCCtxSize_internal(level);
  1390. if (newMB > memBudget) memBudget = newMB;
  1391. }
  1392. return memBudget;
  1393. }
  1394. size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
  1395. {
  1396. RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
  1397. { ZSTD_compressionParameters const cParams =
  1398. ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
  1399. size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
  1400. size_t const inBuffSize = (params->inBufferMode == ZSTD_bm_buffered)
  1401. ? ((size_t)1 << cParams.windowLog) + blockSize
  1402. : 0;
  1403. size_t const outBuffSize = (params->outBufferMode == ZSTD_bm_buffered)
  1404. ? ZSTD_compressBound(blockSize) + 1
  1405. : 0;
  1406. ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, &params->cParams);
  1407. return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
  1408. &cParams, &params->ldmParams, 1, useRowMatchFinder, inBuffSize, outBuffSize,
  1409. ZSTD_CONTENTSIZE_UNKNOWN);
  1410. }
  1411. }
  1412. size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
  1413. {
  1414. ZSTD_CCtx_params initialParams = ZSTD_makeCCtxParamsFromCParams(cParams);
  1415. if (ZSTD_rowMatchFinderSupported(cParams.strategy)) {
  1416. /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */
  1417. size_t noRowCCtxSize;
  1418. size_t rowCCtxSize;
  1419. initialParams.useRowMatchFinder = ZSTD_ps_disable;
  1420. noRowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams);
  1421. initialParams.useRowMatchFinder = ZSTD_ps_enable;
  1422. rowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams);
  1423. return MAX(noRowCCtxSize, rowCCtxSize);
  1424. } else {
  1425. return ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams);
  1426. }
  1427. }
  1428. static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel)
  1429. {
  1430. ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
  1431. return ZSTD_estimateCStreamSize_usingCParams(cParams);
  1432. }
  1433. size_t ZSTD_estimateCStreamSize(int compressionLevel)
  1434. {
  1435. int level;
  1436. size_t memBudget = 0;
  1437. for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
  1438. size_t const newMB = ZSTD_estimateCStreamSize_internal(level);
  1439. if (newMB > memBudget) memBudget = newMB;
  1440. }
  1441. return memBudget;
  1442. }
  1443. /* ZSTD_getFrameProgression():
  1444. * tells how much data has been consumed (input) and produced (output) for current frame.
  1445. * able to count progression inside worker threads (non-blocking mode).
  1446. */
  1447. ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx)
  1448. {
  1449. #ifdef ZSTD_MULTITHREAD
  1450. if (cctx->appliedParams.nbWorkers > 0) {
  1451. return ZSTDMT_getFrameProgression(cctx->mtctx);
  1452. }
  1453. #endif
  1454. { ZSTD_frameProgression fp;
  1455. size_t const buffered = (cctx->inBuff == NULL) ? 0 :
  1456. cctx->inBuffPos - cctx->inToCompress;
  1457. if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress);
  1458. assert(buffered <= ZSTD_BLOCKSIZE_MAX);
  1459. fp.ingested = cctx->consumedSrcSize + buffered;
  1460. fp.consumed = cctx->consumedSrcSize;
  1461. fp.produced = cctx->producedCSize;
  1462. fp.flushed = cctx->producedCSize; /* simplified; some data might still be left within streaming output buffer */
  1463. fp.currentJobID = 0;
  1464. fp.nbActiveWorkers = 0;
  1465. return fp;
  1466. } }
  1467. /*! ZSTD_toFlushNow()
  1468. * Only useful for multithreading scenarios currently (nbWorkers >= 1).
  1469. */
  1470. size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx)
  1471. {
  1472. #ifdef ZSTD_MULTITHREAD
  1473. if (cctx->appliedParams.nbWorkers > 0) {
  1474. return ZSTDMT_toFlushNow(cctx->mtctx);
  1475. }
  1476. #endif
  1477. (void)cctx;
  1478. return 0; /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */
  1479. }
  1480. static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1,
  1481. ZSTD_compressionParameters cParams2)
  1482. {
  1483. (void)cParams1;
  1484. (void)cParams2;
  1485. assert(cParams1.windowLog == cParams2.windowLog);
  1486. assert(cParams1.chainLog == cParams2.chainLog);
  1487. assert(cParams1.hashLog == cParams2.hashLog);
  1488. assert(cParams1.searchLog == cParams2.searchLog);
  1489. assert(cParams1.minMatch == cParams2.minMatch);
  1490. assert(cParams1.targetLength == cParams2.targetLength);
  1491. assert(cParams1.strategy == cParams2.strategy);
  1492. }
  1493. void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
  1494. {
  1495. int i;
  1496. for (i = 0; i < ZSTD_REP_NUM; ++i)
  1497. bs->rep[i] = repStartValue[i];
  1498. bs->entropy.huf.repeatMode = HUF_repeat_none;
  1499. bs->entropy.fse.offcode_repeatMode = FSE_repeat_none;
  1500. bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none;
  1501. bs->entropy.fse.litlength_repeatMode = FSE_repeat_none;
  1502. }
  1503. /*! ZSTD_invalidateMatchState()
  1504. * Invalidate all the matches in the match finder tables.
  1505. * Requires nextSrc and base to be set (can be NULL).
  1506. */
  1507. static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
  1508. {
  1509. ZSTD_window_clear(&ms->window);
  1510. ms->nextToUpdate = ms->window.dictLimit;
  1511. ms->loadedDictEnd = 0;
  1512. ms->opt.litLengthSum = 0; /* force reset of btopt stats */
  1513. ms->dictMatchState = NULL;
  1514. }
  1515. /**
  1516. * Controls, for this matchState reset, whether the tables need to be cleared /
  1517. * prepared for the coming compression (ZSTDcrp_makeClean), or whether the
  1518. * tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a
  1519. * subsequent operation will overwrite the table space anyways (e.g., copying
  1520. * the matchState contents in from a CDict).
  1521. */
  1522. typedef enum {
  1523. ZSTDcrp_makeClean,
  1524. ZSTDcrp_leaveDirty
  1525. } ZSTD_compResetPolicy_e;
  1526. /**
  1527. * Controls, for this matchState reset, whether indexing can continue where it
  1528. * left off (ZSTDirp_continue), or whether it needs to be restarted from zero
  1529. * (ZSTDirp_reset).
  1530. */
  1531. typedef enum {
  1532. ZSTDirp_continue,
  1533. ZSTDirp_reset
  1534. } ZSTD_indexResetPolicy_e;
  1535. typedef enum {
  1536. ZSTD_resetTarget_CDict,
  1537. ZSTD_resetTarget_CCtx
  1538. } ZSTD_resetTarget_e;
  1539. static size_t
  1540. ZSTD_reset_matchState(ZSTD_matchState_t* ms,
  1541. ZSTD_cwksp* ws,
  1542. const ZSTD_compressionParameters* cParams,
  1543. const ZSTD_paramSwitch_e useRowMatchFinder,
  1544. const ZSTD_compResetPolicy_e crp,
  1545. const ZSTD_indexResetPolicy_e forceResetIndex,
  1546. const ZSTD_resetTarget_e forWho)
  1547. {
  1548. /* disable chain table allocation for fast or row-based strategies */
  1549. size_t const chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder,
  1550. ms->dedicatedDictSearch && (forWho == ZSTD_resetTarget_CDict))
  1551. ? ((size_t)1 << cParams->chainLog)
  1552. : 0;
  1553. size_t const hSize = ((size_t)1) << cParams->hashLog;
  1554. U32 const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
  1555. size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
  1556. DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset);
  1557. assert(useRowMatchFinder != ZSTD_ps_auto);
  1558. if (forceResetIndex == ZSTDirp_reset) {
  1559. ZSTD_window_init(&ms->window);
  1560. ZSTD_cwksp_mark_tables_dirty(ws);
  1561. }
  1562. ms->hashLog3 = hashLog3;
  1563. ZSTD_invalidateMatchState(ms);
  1564. assert(!ZSTD_cwksp_reserve_failed(ws)); /* check that allocation hasn't already failed */
  1565. ZSTD_cwksp_clear_tables(ws);
  1566. DEBUGLOG(5, "reserving table space");
  1567. /* table Space */
  1568. ms->hashTable = (U32*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(U32));
  1569. ms->chainTable = (U32*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(U32));
  1570. ms->hashTable3 = (U32*)ZSTD_cwksp_reserve_table(ws, h3Size * sizeof(U32));
  1571. RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
  1572. "failed a workspace allocation in ZSTD_reset_matchState");
  1573. DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_leaveDirty);
  1574. if (crp!=ZSTDcrp_leaveDirty) {
  1575. /* reset tables only */
  1576. ZSTD_cwksp_clean_tables(ws);
  1577. }
  1578. /* opt parser space */
  1579. if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) {
  1580. DEBUGLOG(4, "reserving optimal parser space");
  1581. ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<<Litbits) * sizeof(unsigned));
  1582. ms->opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned));
  1583. ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned));
  1584. ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned));
  1585. ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t));
  1586. ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
  1587. }
  1588. if (ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)) {
  1589. { /* Row match finder needs an additional table of hashes ("tags") */
  1590. size_t const tagTableSize = hSize*sizeof(U16);
  1591. ms->tagTable = (U16*)ZSTD_cwksp_reserve_aligned(ws, tagTableSize);
  1592. if (ms->tagTable) ZSTD_memset(ms->tagTable, 0, tagTableSize);
  1593. }
  1594. { /* Switch to 32-entry rows if searchLog is 5 (or more) */
  1595. U32 const rowLog = BOUNDED(4, cParams->searchLog, 6);
  1596. assert(cParams->hashLog >= rowLog);
  1597. ms->rowHashLog = cParams->hashLog - rowLog;
  1598. }
  1599. }
  1600. ms->cParams = *cParams;
  1601. RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
  1602. "failed a workspace allocation in ZSTD_reset_matchState");
  1603. return 0;
  1604. }
  1605. /* ZSTD_indexTooCloseToMax() :
  1606. * minor optimization : prefer memset() rather than reduceIndex()
  1607. * which is measurably slow in some circumstances (reported for Visual Studio).
  1608. * Works when re-using a context for a lot of smallish inputs :
  1609. * if all inputs are smaller than ZSTD_INDEXOVERFLOW_MARGIN,
  1610. * memset() will be triggered before reduceIndex().
  1611. */
  1612. #define ZSTD_INDEXOVERFLOW_MARGIN (16 MB)
  1613. static int ZSTD_indexTooCloseToMax(ZSTD_window_t w)
  1614. {
  1615. return (size_t)(w.nextSrc - w.base) > (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN);
  1616. }
  1617. /** ZSTD_dictTooBig():
  1618. * When dictionaries are larger than ZSTD_CHUNKSIZE_MAX they can't be loaded in
  1619. * one go generically. So we ensure that in that case we reset the tables to zero,
  1620. * so that we can load as much of the dictionary as possible.
  1621. */
  1622. static int ZSTD_dictTooBig(size_t const loadedDictSize)
  1623. {
  1624. return loadedDictSize > ZSTD_CHUNKSIZE_MAX;
  1625. }
  1626. /*! ZSTD_resetCCtx_internal() :
  1627. * @param loadedDictSize The size of the dictionary to be loaded
  1628. * into the context, if any. If no dictionary is used, or the
  1629. * dictionary is being attached / copied, then pass 0.
  1630. * note : `params` are assumed fully validated at this stage.
  1631. */
  1632. static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
  1633. ZSTD_CCtx_params const* params,
  1634. U64 const pledgedSrcSize,
  1635. size_t const loadedDictSize,
  1636. ZSTD_compResetPolicy_e const crp,
  1637. ZSTD_buffered_policy_e const zbuff)
  1638. {
  1639. ZSTD_cwksp* const ws = &zc->workspace;
  1640. DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u, useRowMatchFinder=%d useBlockSplitter=%d",
  1641. (U32)pledgedSrcSize, params->cParams.windowLog, (int)params->useRowMatchFinder, (int)params->useBlockSplitter);
  1642. assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
  1643. zc->isFirstBlock = 1;
  1644. /* Set applied params early so we can modify them for LDM,
  1645. * and point params at the applied params.
  1646. */
  1647. zc->appliedParams = *params;
  1648. params = &zc->appliedParams;
  1649. assert(params->useRowMatchFinder != ZSTD_ps_auto);
  1650. assert(params->useBlockSplitter != ZSTD_ps_auto);
  1651. assert(params->ldmParams.enableLdm != ZSTD_ps_auto);
  1652. if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
  1653. /* Adjust long distance matching parameters */
  1654. ZSTD_ldm_adjustParameters(&zc->appliedParams.ldmParams, &params->cParams);
  1655. assert(params->ldmParams.hashLog >= params->ldmParams.bucketSizeLog);
  1656. assert(params->ldmParams.hashRateLog < 32);
  1657. }
  1658. { size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params->cParams.windowLog), pledgedSrcSize));
  1659. size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
  1660. U32 const divider = (params->cParams.minMatch==3) ? 3 : 4;
  1661. size_t const maxNbSeq = blockSize / divider;
  1662. size_t const buffOutSize = (zbuff == ZSTDb_buffered && params->outBufferMode == ZSTD_bm_buffered)
  1663. ? ZSTD_compressBound(blockSize) + 1
  1664. : 0;
  1665. size_t const buffInSize = (zbuff == ZSTDb_buffered && params->inBufferMode == ZSTD_bm_buffered)
  1666. ? windowSize + blockSize
  1667. : 0;
  1668. size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize);
  1669. int const indexTooClose = ZSTD_indexTooCloseToMax(zc->blockState.matchState.window);
  1670. int const dictTooBig = ZSTD_dictTooBig(loadedDictSize);
  1671. ZSTD_indexResetPolicy_e needsIndexReset =
  1672. (indexTooClose || dictTooBig || !zc->initialized) ? ZSTDirp_reset : ZSTDirp_continue;
  1673. size_t const neededSpace =
  1674. ZSTD_estimateCCtxSize_usingCCtxParams_internal(
  1675. &params->cParams, &params->ldmParams, zc->staticSize != 0, params->useRowMatchFinder,
  1676. buffInSize, buffOutSize, pledgedSrcSize);
  1677. int resizeWorkspace;
  1678. FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!");
  1679. if (!zc->staticSize) ZSTD_cwksp_bump_oversized_duration(ws, 0);
  1680. { /* Check if workspace is large enough, alloc a new one if needed */
  1681. int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace;
  1682. int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace);
  1683. resizeWorkspace = workspaceTooSmall || workspaceWasteful;
  1684. DEBUGLOG(4, "Need %zu B workspace", neededSpace);
  1685. DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
  1686. if (resizeWorkspace) {
  1687. DEBUGLOG(4, "Resize workspaceSize from %zuKB to %zuKB",
  1688. ZSTD_cwksp_sizeof(ws) >> 10,
  1689. neededSpace >> 10);
  1690. RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize");
  1691. needsIndexReset = ZSTDirp_reset;
  1692. ZSTD_cwksp_free(ws, zc->customMem);
  1693. FORWARD_IF_ERROR(ZSTD_cwksp_create(ws, neededSpace, zc->customMem), "");
  1694. DEBUGLOG(5, "reserving object space");
  1695. /* Statically sized space.
  1696. * entropyWorkspace never moves,
  1697. * though prev/next block swap places */
  1698. assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t)));
  1699. zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
  1700. RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock");
  1701. zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
  1702. RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock");
  1703. zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, ENTROPY_WORKSPACE_SIZE);
  1704. RETURN_ERROR_IF(zc->entropyWorkspace == NULL, memory_allocation, "couldn't allocate entropyWorkspace");
  1705. } }
  1706. ZSTD_cwksp_clear(ws);
  1707. /* init params */
  1708. zc->blockState.matchState.cParams = params->cParams;
  1709. zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
  1710. zc->consumedSrcSize = 0;
  1711. zc->producedCSize = 0;
  1712. if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
  1713. zc->appliedParams.fParams.contentSizeFlag = 0;
  1714. DEBUGLOG(4, "pledged content size : %u ; flag : %u",
  1715. (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
  1716. zc->blockSize = blockSize;
  1717. XXH64_reset(&zc->xxhState, 0);
  1718. zc->stage = ZSTDcs_init;
  1719. zc->dictID = 0;
  1720. zc->dictContentSize = 0;
  1721. ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
  1722. /* ZSTD_wildcopy() is used to copy into the literals buffer,
  1723. * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
  1724. */
  1725. zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH);
  1726. zc->seqStore.maxNbLit = blockSize;
  1727. /* buffers */
  1728. zc->bufferedPolicy = zbuff;
  1729. zc->inBuffSize = buffInSize;
  1730. zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize);
  1731. zc->outBuffSize = buffOutSize;
  1732. zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize);
  1733. /* ldm bucketOffsets table */
  1734. if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
  1735. /* TODO: avoid memset? */
  1736. size_t const numBuckets =
  1737. ((size_t)1) << (params->ldmParams.hashLog -
  1738. params->ldmParams.bucketSizeLog);
  1739. zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, numBuckets);
  1740. ZSTD_memset(zc->ldmState.bucketOffsets, 0, numBuckets);
  1741. }
  1742. /* sequences storage */
  1743. ZSTD_referenceExternalSequences(zc, NULL, 0);
  1744. zc->seqStore.maxNbSeq = maxNbSeq;
  1745. zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
  1746. zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
  1747. zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
  1748. zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef));
  1749. FORWARD_IF_ERROR(ZSTD_reset_matchState(
  1750. &zc->blockState.matchState,
  1751. ws,
  1752. &params->cParams,
  1753. params->useRowMatchFinder,
  1754. crp,
  1755. needsIndexReset,
  1756. ZSTD_resetTarget_CCtx), "");
  1757. /* ldm hash table */
  1758. if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
  1759. /* TODO: avoid memset? */
  1760. size_t const ldmHSize = ((size_t)1) << params->ldmParams.hashLog;
  1761. zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t));
  1762. ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t));
  1763. zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq));
  1764. zc->maxNbLdmSequences = maxNbLdmSeq;
  1765. ZSTD_window_init(&zc->ldmState.window);
  1766. zc->ldmState.loadedDictEnd = 0;
  1767. }
  1768. DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws));
  1769. assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace, resizeWorkspace));
  1770. zc->initialized = 1;
  1771. return 0;
  1772. }
  1773. }
  1774. /* ZSTD_invalidateRepCodes() :
  1775. * ensures next compression will not use repcodes from previous block.
  1776. * Note : only works with regular variant;
  1777. * do not use with extDict variant ! */
  1778. void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
  1779. int i;
  1780. for (i=0; i<ZSTD_REP_NUM; i++) cctx->blockState.prevCBlock->rep[i] = 0;
  1781. assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
  1782. }
  1783. /* These are the approximate sizes for each strategy past which copying the
  1784. * dictionary tables into the working context is faster than using them
  1785. * in-place.
  1786. */
  1787. static const size_t attachDictSizeCutoffs[ZSTD_STRATEGY_MAX+1] = {
  1788. 8 KB, /* unused */
  1789. 8 KB, /* ZSTD_fast */
  1790. 16 KB, /* ZSTD_dfast */
  1791. 32 KB, /* ZSTD_greedy */
  1792. 32 KB, /* ZSTD_lazy */
  1793. 32 KB, /* ZSTD_lazy2 */
  1794. 32 KB, /* ZSTD_btlazy2 */
  1795. 32 KB, /* ZSTD_btopt */
  1796. 8 KB, /* ZSTD_btultra */
  1797. 8 KB /* ZSTD_btultra2 */
  1798. };
  1799. static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict,
  1800. const ZSTD_CCtx_params* params,
  1801. U64 pledgedSrcSize)
  1802. {
  1803. size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy];
  1804. int const dedicatedDictSearch = cdict->matchState.dedicatedDictSearch;
  1805. return dedicatedDictSearch
  1806. || ( ( pledgedSrcSize <= cutoff
  1807. || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
  1808. || params->attachDictPref == ZSTD_dictForceAttach )
  1809. && params->attachDictPref != ZSTD_dictForceCopy
  1810. && !params->forceWindow ); /* dictMatchState isn't correctly
  1811. * handled in _enforceMaxDist */
  1812. }
  1813. static size_t
  1814. ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,
  1815. const ZSTD_CDict* cdict,
  1816. ZSTD_CCtx_params params,
  1817. U64 pledgedSrcSize,
  1818. ZSTD_buffered_policy_e zbuff)
  1819. {
  1820. DEBUGLOG(4, "ZSTD_resetCCtx_byAttachingCDict() pledgedSrcSize=%llu",
  1821. (unsigned long long)pledgedSrcSize);
  1822. {
  1823. ZSTD_compressionParameters adjusted_cdict_cParams = cdict->matchState.cParams;
  1824. unsigned const windowLog = params.cParams.windowLog;
  1825. assert(windowLog != 0);
  1826. /* Resize working context table params for input only, since the dict
  1827. * has its own tables. */
  1828. /* pledgedSrcSize == 0 means 0! */
  1829. if (cdict->matchState.dedicatedDictSearch) {
  1830. ZSTD_dedicatedDictSearch_revertCParams(&adjusted_cdict_cParams);
  1831. }
  1832. params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize,
  1833. cdict->dictContentSize, ZSTD_cpm_attachDict);
  1834. params.cParams.windowLog = windowLog;
  1835. params.useRowMatchFinder = cdict->useRowMatchFinder; /* cdict overrides */
  1836. FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, &params, pledgedSrcSize,
  1837. /* loadedDictSize */ 0,
  1838. ZSTDcrp_makeClean, zbuff), "");
  1839. assert(cctx->appliedParams.cParams.strategy == adjusted_cdict_cParams.strategy);
  1840. }
  1841. { const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc
  1842. - cdict->matchState.window.base);
  1843. const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit;
  1844. if (cdictLen == 0) {
  1845. /* don't even attach dictionaries with no contents */
  1846. DEBUGLOG(4, "skipping attaching empty dictionary");
  1847. } else {
  1848. DEBUGLOG(4, "attaching dictionary into context");
  1849. cctx->blockState.matchState.dictMatchState = &cdict->matchState;
  1850. /* prep working match state so dict matches never have negative indices
  1851. * when they are translated to the working context's index space. */
  1852. if (cctx->blockState.matchState.window.dictLimit < cdictEnd) {
  1853. cctx->blockState.matchState.window.nextSrc =
  1854. cctx->blockState.matchState.window.base + cdictEnd;
  1855. ZSTD_window_clear(&cctx->blockState.matchState.window);
  1856. }
  1857. /* loadedDictEnd is expressed within the referential of the active context */
  1858. cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit;
  1859. } }
  1860. cctx->dictID = cdict->dictID;
  1861. cctx->dictContentSize = cdict->dictContentSize;
  1862. /* copy block state */
  1863. ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
  1864. return 0;
  1865. }
  1866. static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
  1867. const ZSTD_CDict* cdict,
  1868. ZSTD_CCtx_params params,
  1869. U64 pledgedSrcSize,
  1870. ZSTD_buffered_policy_e zbuff)
  1871. {
  1872. const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
  1873. assert(!cdict->matchState.dedicatedDictSearch);
  1874. DEBUGLOG(4, "ZSTD_resetCCtx_byCopyingCDict() pledgedSrcSize=%llu",
  1875. (unsigned long long)pledgedSrcSize);
  1876. { unsigned const windowLog = params.cParams.windowLog;
  1877. assert(windowLog != 0);
  1878. /* Copy only compression parameters related to tables. */
  1879. params.cParams = *cdict_cParams;
  1880. params.cParams.windowLog = windowLog;
  1881. params.useRowMatchFinder = cdict->useRowMatchFinder;
  1882. FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, &params, pledgedSrcSize,
  1883. /* loadedDictSize */ 0,
  1884. ZSTDcrp_leaveDirty, zbuff), "");
  1885. assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
  1886. assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog);
  1887. assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog);
  1888. }
  1889. ZSTD_cwksp_mark_tables_dirty(&cctx->workspace);
  1890. assert(params.useRowMatchFinder != ZSTD_ps_auto);
  1891. /* copy tables */
  1892. { size_t const chainSize = ZSTD_allocateChainTable(cdict_cParams->strategy, cdict->useRowMatchFinder, 0 /* DDS guaranteed disabled */)
  1893. ? ((size_t)1 << cdict_cParams->chainLog)
  1894. : 0;
  1895. size_t const hSize = (size_t)1 << cdict_cParams->hashLog;
  1896. ZSTD_memcpy(cctx->blockState.matchState.hashTable,
  1897. cdict->matchState.hashTable,
  1898. hSize * sizeof(U32));
  1899. /* Do not copy cdict's chainTable if cctx has parameters such that it would not use chainTable */
  1900. if (ZSTD_allocateChainTable(cctx->appliedParams.cParams.strategy, cctx->appliedParams.useRowMatchFinder, 0 /* forDDSDict */)) {
  1901. ZSTD_memcpy(cctx->blockState.matchState.chainTable,
  1902. cdict->matchState.chainTable,
  1903. chainSize * sizeof(U32));
  1904. }
  1905. /* copy tag table */
  1906. if (ZSTD_rowMatchFinderUsed(cdict_cParams->strategy, cdict->useRowMatchFinder)) {
  1907. size_t const tagTableSize = hSize*sizeof(U16);
  1908. ZSTD_memcpy(cctx->blockState.matchState.tagTable,
  1909. cdict->matchState.tagTable,
  1910. tagTableSize);
  1911. }
  1912. }
  1913. /* Zero the hashTable3, since the cdict never fills it */
  1914. { int const h3log = cctx->blockState.matchState.hashLog3;
  1915. size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
  1916. assert(cdict->matchState.hashLog3 == 0);
  1917. ZSTD_memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));
  1918. }
  1919. ZSTD_cwksp_mark_tables_clean(&cctx->workspace);
  1920. /* copy dictionary offsets */
  1921. { ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
  1922. ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
  1923. dstMatchState->window = srcMatchState->window;
  1924. dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
  1925. dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
  1926. }
  1927. cctx->dictID = cdict->dictID;
  1928. cctx->dictContentSize = cdict->dictContentSize;
  1929. /* copy block state */
  1930. ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
  1931. return 0;
  1932. }
  1933. /* We have a choice between copying the dictionary context into the working
  1934. * context, or referencing the dictionary context from the working context
  1935. * in-place. We decide here which strategy to use. */
  1936. static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
  1937. const ZSTD_CDict* cdict,
  1938. const ZSTD_CCtx_params* params,
  1939. U64 pledgedSrcSize,
  1940. ZSTD_buffered_policy_e zbuff)
  1941. {
  1942. DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)",
  1943. (unsigned)pledgedSrcSize);
  1944. if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) {
  1945. return ZSTD_resetCCtx_byAttachingCDict(
  1946. cctx, cdict, *params, pledgedSrcSize, zbuff);
  1947. } else {
  1948. return ZSTD_resetCCtx_byCopyingCDict(
  1949. cctx, cdict, *params, pledgedSrcSize, zbuff);
  1950. }
  1951. }
  1952. /*! ZSTD_copyCCtx_internal() :
  1953. * Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
  1954. * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
  1955. * The "context", in this case, refers to the hash and chain tables,
  1956. * entropy tables, and dictionary references.
  1957. * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx.
  1958. * @return : 0, or an error code */
  1959. static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
  1960. const ZSTD_CCtx* srcCCtx,
  1961. ZSTD_frameParameters fParams,
  1962. U64 pledgedSrcSize,
  1963. ZSTD_buffered_policy_e zbuff)
  1964. {
  1965. RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong,
  1966. "Can't copy a ctx that's not in init stage.");
  1967. DEBUGLOG(5, "ZSTD_copyCCtx_internal");
  1968. ZSTD_memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
  1969. { ZSTD_CCtx_params params = dstCCtx->requestedParams;
  1970. /* Copy only compression parameters related to tables. */
  1971. params.cParams = srcCCtx->appliedParams.cParams;
  1972. assert(srcCCtx->appliedParams.useRowMatchFinder != ZSTD_ps_auto);
  1973. assert(srcCCtx->appliedParams.useBlockSplitter != ZSTD_ps_auto);
  1974. assert(srcCCtx->appliedParams.ldmParams.enableLdm != ZSTD_ps_auto);
  1975. params.useRowMatchFinder = srcCCtx->appliedParams.useRowMatchFinder;
  1976. params.useBlockSplitter = srcCCtx->appliedParams.useBlockSplitter;
  1977. params.ldmParams = srcCCtx->appliedParams.ldmParams;
  1978. params.fParams = fParams;
  1979. ZSTD_resetCCtx_internal(dstCCtx, &params, pledgedSrcSize,
  1980. /* loadedDictSize */ 0,
  1981. ZSTDcrp_leaveDirty, zbuff);
  1982. assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog);
  1983. assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy);
  1984. assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog);
  1985. assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog);
  1986. assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3);
  1987. }
  1988. ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace);
  1989. /* copy tables */
  1990. { size_t const chainSize = ZSTD_allocateChainTable(srcCCtx->appliedParams.cParams.strategy,
  1991. srcCCtx->appliedParams.useRowMatchFinder,
  1992. 0 /* forDDSDict */)
  1993. ? ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog)
  1994. : 0;
  1995. size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
  1996. int const h3log = srcCCtx->blockState.matchState.hashLog3;
  1997. size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
  1998. ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable,
  1999. srcCCtx->blockState.matchState.hashTable,
  2000. hSize * sizeof(U32));
  2001. ZSTD_memcpy(dstCCtx->blockState.matchState.chainTable,
  2002. srcCCtx->blockState.matchState.chainTable,
  2003. chainSize * sizeof(U32));
  2004. ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable3,
  2005. srcCCtx->blockState.matchState.hashTable3,
  2006. h3Size * sizeof(U32));
  2007. }
  2008. ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace);
  2009. /* copy dictionary offsets */
  2010. {
  2011. const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
  2012. ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
  2013. dstMatchState->window = srcMatchState->window;
  2014. dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
  2015. dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
  2016. }
  2017. dstCCtx->dictID = srcCCtx->dictID;
  2018. dstCCtx->dictContentSize = srcCCtx->dictContentSize;
  2019. /* copy block state */
  2020. ZSTD_memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock));
  2021. return 0;
  2022. }
  2023. /*! ZSTD_copyCCtx() :
  2024. * Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
  2025. * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
  2026. * pledgedSrcSize==0 means "unknown".
  2027. * @return : 0, or an error code */
  2028. size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
  2029. {
  2030. ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
  2031. ZSTD_buffered_policy_e const zbuff = srcCCtx->bufferedPolicy;
  2032. ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1);
  2033. if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
  2034. fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN);
  2035. return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx,
  2036. fParams, pledgedSrcSize,
  2037. zbuff);
  2038. }
  2039. #define ZSTD_ROWSIZE 16
  2040. /*! ZSTD_reduceTable() :
  2041. * reduce table indexes by `reducerValue`, or squash to zero.
  2042. * PreserveMark preserves "unsorted mark" for btlazy2 strategy.
  2043. * It must be set to a clear 0/1 value, to remove branch during inlining.
  2044. * Presume table size is a multiple of ZSTD_ROWSIZE
  2045. * to help auto-vectorization */
  2046. FORCE_INLINE_TEMPLATE void
  2047. ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark)
  2048. {
  2049. int const nbRows = (int)size / ZSTD_ROWSIZE;
  2050. int cellNb = 0;
  2051. int rowNb;
  2052. /* Protect special index values < ZSTD_WINDOW_START_INDEX. */
  2053. U32 const reducerThreshold = reducerValue + ZSTD_WINDOW_START_INDEX;
  2054. assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */
  2055. assert(size < (1U<<31)); /* can be casted to int */
  2056. #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
  2057. /* To validate that the table re-use logic is sound, and that we don't
  2058. * access table space that we haven't cleaned, we re-"poison" the table
  2059. * space every time we mark it dirty.
  2060. *
  2061. * This function however is intended to operate on those dirty tables and
  2062. * re-clean them. So when this function is used correctly, we can unpoison
  2063. * the memory it operated on. This introduces a blind spot though, since
  2064. * if we now try to operate on __actually__ poisoned memory, we will not
  2065. * detect that. */
  2066. __msan_unpoison(table, size * sizeof(U32));
  2067. #endif
  2068. for (rowNb=0 ; rowNb < nbRows ; rowNb++) {
  2069. int column;
  2070. for (column=0; column<ZSTD_ROWSIZE; column++) {
  2071. U32 newVal;
  2072. if (preserveMark && table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) {
  2073. /* This write is pointless, but is required(?) for the compiler
  2074. * to auto-vectorize the loop. */
  2075. newVal = ZSTD_DUBT_UNSORTED_MARK;
  2076. } else if (table[cellNb] < reducerThreshold) {
  2077. newVal = 0;
  2078. } else {
  2079. newVal = table[cellNb] - reducerValue;
  2080. }
  2081. table[cellNb] = newVal;
  2082. cellNb++;
  2083. } }
  2084. }
  2085. static void ZSTD_reduceTable(U32* const table, U32 const size, U32 const reducerValue)
  2086. {
  2087. ZSTD_reduceTable_internal(table, size, reducerValue, 0);
  2088. }
  2089. static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const reducerValue)
  2090. {
  2091. ZSTD_reduceTable_internal(table, size, reducerValue, 1);
  2092. }
  2093. /*! ZSTD_reduceIndex() :
  2094. * rescale all indexes to avoid future overflow (indexes are U32) */
  2095. static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue)
  2096. {
  2097. { U32 const hSize = (U32)1 << params->cParams.hashLog;
  2098. ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
  2099. }
  2100. if (ZSTD_allocateChainTable(params->cParams.strategy, params->useRowMatchFinder, (U32)ms->dedicatedDictSearch)) {
  2101. U32 const chainSize = (U32)1 << params->cParams.chainLog;
  2102. if (params->cParams.strategy == ZSTD_btlazy2)
  2103. ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue);
  2104. else
  2105. ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue);
  2106. }
  2107. if (ms->hashLog3) {
  2108. U32 const h3Size = (U32)1 << ms->hashLog3;
  2109. ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue);
  2110. }
  2111. }
  2112. /*-*******************************************************
  2113. * Block entropic compression
  2114. *********************************************************/
  2115. /* See doc/zstd_compression_format.md for detailed format description */
  2116. void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
  2117. {
  2118. const seqDef* const sequences = seqStorePtr->sequencesStart;
  2119. BYTE* const llCodeTable = seqStorePtr->llCode;
  2120. BYTE* const ofCodeTable = seqStorePtr->ofCode;
  2121. BYTE* const mlCodeTable = seqStorePtr->mlCode;
  2122. U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
  2123. U32 u;
  2124. assert(nbSeq <= seqStorePtr->maxNbSeq);
  2125. for (u=0; u<nbSeq; u++) {
  2126. U32 const llv = sequences[u].litLength;
  2127. U32 const mlv = sequences[u].mlBase;
  2128. llCodeTable[u] = (BYTE)ZSTD_LLcode(llv);
  2129. ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offBase);
  2130. mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);
  2131. }
  2132. if (seqStorePtr->longLengthType==ZSTD_llt_literalLength)
  2133. llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
  2134. if (seqStorePtr->longLengthType==ZSTD_llt_matchLength)
  2135. mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
  2136. }
  2137. /* ZSTD_useTargetCBlockSize():
  2138. * Returns if target compressed block size param is being used.
  2139. * If used, compression will do best effort to make a compressed block size to be around targetCBlockSize.
  2140. * Returns 1 if true, 0 otherwise. */
  2141. static int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams)
  2142. {
  2143. DEBUGLOG(5, "ZSTD_useTargetCBlockSize (targetCBlockSize=%zu)", cctxParams->targetCBlockSize);
  2144. return (cctxParams->targetCBlockSize != 0);
  2145. }
  2146. /* ZSTD_blockSplitterEnabled():
  2147. * Returns if block splitting param is being used
  2148. * If used, compression will do best effort to split a block in order to improve compression ratio.
  2149. * At the time this function is called, the parameter must be finalized.
  2150. * Returns 1 if true, 0 otherwise. */
  2151. static int ZSTD_blockSplitterEnabled(ZSTD_CCtx_params* cctxParams)
  2152. {
  2153. DEBUGLOG(5, "ZSTD_blockSplitterEnabled (useBlockSplitter=%d)", cctxParams->useBlockSplitter);
  2154. assert(cctxParams->useBlockSplitter != ZSTD_ps_auto);
  2155. return (cctxParams->useBlockSplitter == ZSTD_ps_enable);
  2156. }
  2157. /* Type returned by ZSTD_buildSequencesStatistics containing finalized symbol encoding types
  2158. * and size of the sequences statistics
  2159. */
  2160. typedef struct {
  2161. U32 LLtype;
  2162. U32 Offtype;
  2163. U32 MLtype;
  2164. size_t size;
  2165. size_t lastCountSize; /* Accounts for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */
  2166. } ZSTD_symbolEncodingTypeStats_t;
  2167. /* ZSTD_buildSequencesStatistics():
  2168. * Returns a ZSTD_symbolEncodingTypeStats_t, or a zstd error code in the `size` field.
  2169. * Modifies `nextEntropy` to have the appropriate values as a side effect.
  2170. * nbSeq must be greater than 0.
  2171. *
  2172. * entropyWkspSize must be of size at least ENTROPY_WORKSPACE_SIZE - (MaxSeq + 1)*sizeof(U32)
  2173. */
  2174. static ZSTD_symbolEncodingTypeStats_t
  2175. ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq,
  2176. const ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy,
  2177. BYTE* dst, const BYTE* const dstEnd,
  2178. ZSTD_strategy strategy, unsigned* countWorkspace,
  2179. void* entropyWorkspace, size_t entropyWkspSize) {
  2180. BYTE* const ostart = dst;
  2181. const BYTE* const oend = dstEnd;
  2182. BYTE* op = ostart;
  2183. FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable;
  2184. FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable;
  2185. FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable;
  2186. const BYTE* const ofCodeTable = seqStorePtr->ofCode;
  2187. const BYTE* const llCodeTable = seqStorePtr->llCode;
  2188. const BYTE* const mlCodeTable = seqStorePtr->mlCode;
  2189. ZSTD_symbolEncodingTypeStats_t stats;
  2190. stats.lastCountSize = 0;
  2191. /* convert length/distances into codes */
  2192. ZSTD_seqToCodes(seqStorePtr);
  2193. assert(op <= oend);
  2194. assert(nbSeq != 0); /* ZSTD_selectEncodingType() divides by nbSeq */
  2195. /* build CTable for Literal Lengths */
  2196. { unsigned max = MaxLL;
  2197. size_t const mostFrequent = HIST_countFast_wksp(countWorkspace, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
  2198. DEBUGLOG(5, "Building LL table");
  2199. nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode;
  2200. stats.LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode,
  2201. countWorkspace, max, mostFrequent, nbSeq,
  2202. LLFSELog, prevEntropy->litlengthCTable,
  2203. LL_defaultNorm, LL_defaultNormLog,
  2204. ZSTD_defaultAllowed, strategy);
  2205. assert(set_basic < set_compressed && set_rle < set_compressed);
  2206. assert(!(stats.LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
  2207. { size_t const countSize = ZSTD_buildCTable(
  2208. op, (size_t)(oend - op),
  2209. CTable_LitLength, LLFSELog, (symbolEncodingType_e)stats.LLtype,
  2210. countWorkspace, max, llCodeTable, nbSeq,
  2211. LL_defaultNorm, LL_defaultNormLog, MaxLL,
  2212. prevEntropy->litlengthCTable,
  2213. sizeof(prevEntropy->litlengthCTable),
  2214. entropyWorkspace, entropyWkspSize);
  2215. if (ZSTD_isError(countSize)) {
  2216. DEBUGLOG(3, "ZSTD_buildCTable for LitLens failed");
  2217. stats.size = countSize;
  2218. return stats;
  2219. }
  2220. if (stats.LLtype == set_compressed)
  2221. stats.lastCountSize = countSize;
  2222. op += countSize;
  2223. assert(op <= oend);
  2224. } }
  2225. /* build CTable for Offsets */
  2226. { unsigned max = MaxOff;
  2227. size_t const mostFrequent = HIST_countFast_wksp(
  2228. countWorkspace, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
  2229. /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
  2230. ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
  2231. DEBUGLOG(5, "Building OF table");
  2232. nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;
  2233. stats.Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode,
  2234. countWorkspace, max, mostFrequent, nbSeq,
  2235. OffFSELog, prevEntropy->offcodeCTable,
  2236. OF_defaultNorm, OF_defaultNormLog,
  2237. defaultPolicy, strategy);
  2238. assert(!(stats.Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
  2239. { size_t const countSize = ZSTD_buildCTable(
  2240. op, (size_t)(oend - op),
  2241. CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)stats.Offtype,
  2242. countWorkspace, max, ofCodeTable, nbSeq,
  2243. OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
  2244. prevEntropy->offcodeCTable,
  2245. sizeof(prevEntropy->offcodeCTable),
  2246. entropyWorkspace, entropyWkspSize);
  2247. if (ZSTD_isError(countSize)) {
  2248. DEBUGLOG(3, "ZSTD_buildCTable for Offsets failed");
  2249. stats.size = countSize;
  2250. return stats;
  2251. }
  2252. if (stats.Offtype == set_compressed)
  2253. stats.lastCountSize = countSize;
  2254. op += countSize;
  2255. assert(op <= oend);
  2256. } }
  2257. /* build CTable for MatchLengths */
  2258. { unsigned max = MaxML;
  2259. size_t const mostFrequent = HIST_countFast_wksp(
  2260. countWorkspace, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
  2261. DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
  2262. nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode;
  2263. stats.MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode,
  2264. countWorkspace, max, mostFrequent, nbSeq,
  2265. MLFSELog, prevEntropy->matchlengthCTable,
  2266. ML_defaultNorm, ML_defaultNormLog,
  2267. ZSTD_defaultAllowed, strategy);
  2268. assert(!(stats.MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
  2269. { size_t const countSize = ZSTD_buildCTable(
  2270. op, (size_t)(oend - op),
  2271. CTable_MatchLength, MLFSELog, (symbolEncodingType_e)stats.MLtype,
  2272. countWorkspace, max, mlCodeTable, nbSeq,
  2273. ML_defaultNorm, ML_defaultNormLog, MaxML,
  2274. prevEntropy->matchlengthCTable,
  2275. sizeof(prevEntropy->matchlengthCTable),
  2276. entropyWorkspace, entropyWkspSize);
  2277. if (ZSTD_isError(countSize)) {
  2278. DEBUGLOG(3, "ZSTD_buildCTable for MatchLengths failed");
  2279. stats.size = countSize;
  2280. return stats;
  2281. }
  2282. if (stats.MLtype == set_compressed)
  2283. stats.lastCountSize = countSize;
  2284. op += countSize;
  2285. assert(op <= oend);
  2286. } }
  2287. stats.size = (size_t)(op-ostart);
  2288. return stats;
  2289. }
  2290. /* ZSTD_entropyCompressSeqStore_internal():
  2291. * compresses both literals and sequences
  2292. * Returns compressed size of block, or a zstd error.
  2293. */
  2294. #define SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO 20
  2295. MEM_STATIC size_t
  2296. ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
  2297. const ZSTD_entropyCTables_t* prevEntropy,
  2298. ZSTD_entropyCTables_t* nextEntropy,
  2299. const ZSTD_CCtx_params* cctxParams,
  2300. void* dst, size_t dstCapacity,
  2301. void* entropyWorkspace, size_t entropyWkspSize,
  2302. const int bmi2)
  2303. {
  2304. const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
  2305. ZSTD_strategy const strategy = cctxParams->cParams.strategy;
  2306. unsigned* count = (unsigned*)entropyWorkspace;
  2307. FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
  2308. FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
  2309. FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
  2310. const seqDef* const sequences = seqStorePtr->sequencesStart;
  2311. const size_t nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
  2312. const BYTE* const ofCodeTable = seqStorePtr->ofCode;
  2313. const BYTE* const llCodeTable = seqStorePtr->llCode;
  2314. const BYTE* const mlCodeTable = seqStorePtr->mlCode;
  2315. BYTE* const ostart = (BYTE*)dst;
  2316. BYTE* const oend = ostart + dstCapacity;
  2317. BYTE* op = ostart;
  2318. size_t lastCountSize;
  2319. entropyWorkspace = count + (MaxSeq + 1);
  2320. entropyWkspSize -= (MaxSeq + 1) * sizeof(*count);
  2321. DEBUGLOG(4, "ZSTD_entropyCompressSeqStore_internal (nbSeq=%zu)", nbSeq);
  2322. ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
  2323. assert(entropyWkspSize >= HUF_WORKSPACE_SIZE);
  2324. /* Compress literals */
  2325. { const BYTE* const literals = seqStorePtr->litStart;
  2326. size_t const numSequences = seqStorePtr->sequences - seqStorePtr->sequencesStart;
  2327. size_t const numLiterals = seqStorePtr->lit - seqStorePtr->litStart;
  2328. /* Base suspicion of uncompressibility on ratio of literals to sequences */
  2329. unsigned const suspectUncompressible = (numSequences == 0) || (numLiterals / numSequences >= SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO);
  2330. size_t const litSize = (size_t)(seqStorePtr->lit - literals);
  2331. size_t const cSize = ZSTD_compressLiterals(
  2332. &prevEntropy->huf, &nextEntropy->huf,
  2333. cctxParams->cParams.strategy,
  2334. ZSTD_literalsCompressionIsDisabled(cctxParams),
  2335. op, dstCapacity,
  2336. literals, litSize,
  2337. entropyWorkspace, entropyWkspSize,
  2338. bmi2, suspectUncompressible);
  2339. FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed");
  2340. assert(cSize <= dstCapacity);
  2341. op += cSize;
  2342. }
  2343. /* Sequences Header */
  2344. RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
  2345. dstSize_tooSmall, "Can't fit seq hdr in output buf!");
  2346. if (nbSeq < 128) {
  2347. *op++ = (BYTE)nbSeq;
  2348. } else if (nbSeq < LONGNBSEQ) {
  2349. op[0] = (BYTE)((nbSeq>>8) + 0x80);
  2350. op[1] = (BYTE)nbSeq;
  2351. op+=2;
  2352. } else {
  2353. op[0]=0xFF;
  2354. MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ));
  2355. op+=3;
  2356. }
  2357. assert(op <= oend);
  2358. if (nbSeq==0) {
  2359. /* Copy the old tables over as if we repeated them */
  2360. ZSTD_memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
  2361. return (size_t)(op - ostart);
  2362. }
  2363. {
  2364. ZSTD_symbolEncodingTypeStats_t stats;
  2365. BYTE* seqHead = op++;
  2366. /* build stats for sequences */
  2367. stats = ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq,
  2368. &prevEntropy->fse, &nextEntropy->fse,
  2369. op, oend,
  2370. strategy, count,
  2371. entropyWorkspace, entropyWkspSize);
  2372. FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!");
  2373. *seqHead = (BYTE)((stats.LLtype<<6) + (stats.Offtype<<4) + (stats.MLtype<<2));
  2374. lastCountSize = stats.lastCountSize;
  2375. op += stats.size;
  2376. }
  2377. { size_t const bitstreamSize = ZSTD_encodeSequences(
  2378. op, (size_t)(oend - op),
  2379. CTable_MatchLength, mlCodeTable,
  2380. CTable_OffsetBits, ofCodeTable,
  2381. CTable_LitLength, llCodeTable,
  2382. sequences, nbSeq,
  2383. longOffsets, bmi2);
  2384. FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed");
  2385. op += bitstreamSize;
  2386. assert(op <= oend);
  2387. /* zstd versions <= 1.3.4 mistakenly report corruption when
  2388. * FSE_readNCount() receives a buffer < 4 bytes.
  2389. * Fixed by https://github.com/facebook/zstd/pull/1146.
  2390. * This can happen when the last set_compressed table present is 2
  2391. * bytes and the bitstream is only one byte.
  2392. * In this exceedingly rare case, we will simply emit an uncompressed
  2393. * block, since it isn't worth optimizing.
  2394. */
  2395. if (lastCountSize && (lastCountSize + bitstreamSize) < 4) {
  2396. /* lastCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
  2397. assert(lastCountSize + bitstreamSize == 3);
  2398. DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
  2399. "emitting an uncompressed block.");
  2400. return 0;
  2401. }
  2402. }
  2403. DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart));
  2404. return (size_t)(op - ostart);
  2405. }
  2406. MEM_STATIC size_t
  2407. ZSTD_entropyCompressSeqStore(seqStore_t* seqStorePtr,
  2408. const ZSTD_entropyCTables_t* prevEntropy,
  2409. ZSTD_entropyCTables_t* nextEntropy,
  2410. const ZSTD_CCtx_params* cctxParams,
  2411. void* dst, size_t dstCapacity,
  2412. size_t srcSize,
  2413. void* entropyWorkspace, size_t entropyWkspSize,
  2414. int bmi2)
  2415. {
  2416. size_t const cSize = ZSTD_entropyCompressSeqStore_internal(
  2417. seqStorePtr, prevEntropy, nextEntropy, cctxParams,
  2418. dst, dstCapacity,
  2419. entropyWorkspace, entropyWkspSize, bmi2);
  2420. if (cSize == 0) return 0;
  2421. /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
  2422. * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
  2423. */
  2424. if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
  2425. return 0; /* block not compressed */
  2426. FORWARD_IF_ERROR(cSize, "ZSTD_entropyCompressSeqStore_internal failed");
  2427. /* Check compressibility */
  2428. { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
  2429. if (cSize >= maxCSize) return 0; /* block not compressed */
  2430. }
  2431. DEBUGLOG(4, "ZSTD_entropyCompressSeqStore() cSize: %zu", cSize);
  2432. return cSize;
  2433. }
  2434. /* ZSTD_selectBlockCompressor() :
  2435. * Not static, but internal use only (used by long distance matcher)
  2436. * assumption : strat is a valid strategy */
  2437. ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e useRowMatchFinder, ZSTD_dictMode_e dictMode)
  2438. {
  2439. static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = {
  2440. { ZSTD_compressBlock_fast /* default for 0 */,
  2441. ZSTD_compressBlock_fast,
  2442. ZSTD_compressBlock_doubleFast,
  2443. ZSTD_compressBlock_greedy,
  2444. ZSTD_compressBlock_lazy,
  2445. ZSTD_compressBlock_lazy2,
  2446. ZSTD_compressBlock_btlazy2,
  2447. ZSTD_compressBlock_btopt,
  2448. ZSTD_compressBlock_btultra,
  2449. ZSTD_compressBlock_btultra2 },
  2450. { ZSTD_compressBlock_fast_extDict /* default for 0 */,
  2451. ZSTD_compressBlock_fast_extDict,
  2452. ZSTD_compressBlock_doubleFast_extDict,
  2453. ZSTD_compressBlock_greedy_extDict,
  2454. ZSTD_compressBlock_lazy_extDict,
  2455. ZSTD_compressBlock_lazy2_extDict,
  2456. ZSTD_compressBlock_btlazy2_extDict,
  2457. ZSTD_compressBlock_btopt_extDict,
  2458. ZSTD_compressBlock_btultra_extDict,
  2459. ZSTD_compressBlock_btultra_extDict },
  2460. { ZSTD_compressBlock_fast_dictMatchState /* default for 0 */,
  2461. ZSTD_compressBlock_fast_dictMatchState,
  2462. ZSTD_compressBlock_doubleFast_dictMatchState,
  2463. ZSTD_compressBlock_greedy_dictMatchState,
  2464. ZSTD_compressBlock_lazy_dictMatchState,
  2465. ZSTD_compressBlock_lazy2_dictMatchState,
  2466. ZSTD_compressBlock_btlazy2_dictMatchState,
  2467. ZSTD_compressBlock_btopt_dictMatchState,
  2468. ZSTD_compressBlock_btultra_dictMatchState,
  2469. ZSTD_compressBlock_btultra_dictMatchState },
  2470. { NULL /* default for 0 */,
  2471. NULL,
  2472. NULL,
  2473. ZSTD_compressBlock_greedy_dedicatedDictSearch,
  2474. ZSTD_compressBlock_lazy_dedicatedDictSearch,
  2475. ZSTD_compressBlock_lazy2_dedicatedDictSearch,
  2476. NULL,
  2477. NULL,
  2478. NULL,
  2479. NULL }
  2480. };
  2481. ZSTD_blockCompressor selectedCompressor;
  2482. ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
  2483. assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
  2484. DEBUGLOG(4, "Selected block compressor: dictMode=%d strat=%d rowMatchfinder=%d", (int)dictMode, (int)strat, (int)useRowMatchFinder);
  2485. if (ZSTD_rowMatchFinderUsed(strat, useRowMatchFinder)) {
  2486. static const ZSTD_blockCompressor rowBasedBlockCompressors[4][3] = {
  2487. { ZSTD_compressBlock_greedy_row,
  2488. ZSTD_compressBlock_lazy_row,
  2489. ZSTD_compressBlock_lazy2_row },
  2490. { ZSTD_compressBlock_greedy_extDict_row,
  2491. ZSTD_compressBlock_lazy_extDict_row,
  2492. ZSTD_compressBlock_lazy2_extDict_row },
  2493. { ZSTD_compressBlock_greedy_dictMatchState_row,
  2494. ZSTD_compressBlock_lazy_dictMatchState_row,
  2495. ZSTD_compressBlock_lazy2_dictMatchState_row },
  2496. { ZSTD_compressBlock_greedy_dedicatedDictSearch_row,
  2497. ZSTD_compressBlock_lazy_dedicatedDictSearch_row,
  2498. ZSTD_compressBlock_lazy2_dedicatedDictSearch_row }
  2499. };
  2500. DEBUGLOG(4, "Selecting a row-based matchfinder");
  2501. assert(useRowMatchFinder != ZSTD_ps_auto);
  2502. selectedCompressor = rowBasedBlockCompressors[(int)dictMode][(int)strat - (int)ZSTD_greedy];
  2503. } else {
  2504. selectedCompressor = blockCompressor[(int)dictMode][(int)strat];
  2505. }
  2506. assert(selectedCompressor != NULL);
  2507. return selectedCompressor;
  2508. }
  2509. static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
  2510. const BYTE* anchor, size_t lastLLSize)
  2511. {
  2512. ZSTD_memcpy(seqStorePtr->lit, anchor, lastLLSize);
  2513. seqStorePtr->lit += lastLLSize;
  2514. }
  2515. void ZSTD_resetSeqStore(seqStore_t* ssPtr)
  2516. {
  2517. ssPtr->lit = ssPtr->litStart;
  2518. ssPtr->sequences = ssPtr->sequencesStart;
  2519. ssPtr->longLengthType = ZSTD_llt_none;
  2520. }
  2521. typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e;
  2522. static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
  2523. {
  2524. ZSTD_matchState_t* const ms = &zc->blockState.matchState;
  2525. DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize);
  2526. assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
  2527. /* Assert that we have correctly flushed the ctx params into the ms's copy */
  2528. ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);
  2529. if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
  2530. if (zc->appliedParams.cParams.strategy >= ZSTD_btopt) {
  2531. ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize);
  2532. } else {
  2533. ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch);
  2534. }
  2535. return ZSTDbss_noCompress; /* don't even attempt compression below a certain srcSize */
  2536. }
  2537. ZSTD_resetSeqStore(&(zc->seqStore));
  2538. /* required for optimal parser to read stats from dictionary */
  2539. ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy;
  2540. /* tell the optimal parser how we expect to compress literals */
  2541. ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode;
  2542. /* a gap between an attached dict and the current window is not safe,
  2543. * they must remain adjacent,
  2544. * and when that stops being the case, the dict must be unset */
  2545. assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit);
  2546. /* limited update after a very long match */
  2547. { const BYTE* const base = ms->window.base;
  2548. const BYTE* const istart = (const BYTE*)src;
  2549. const U32 curr = (U32)(istart-base);
  2550. if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1)); /* ensure no overflow */
  2551. if (curr > ms->nextToUpdate + 384)
  2552. ms->nextToUpdate = curr - MIN(192, (U32)(curr - ms->nextToUpdate - 384));
  2553. }
  2554. /* select and store sequences */
  2555. { ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms);
  2556. size_t lastLLSize;
  2557. { int i;
  2558. for (i = 0; i < ZSTD_REP_NUM; ++i)
  2559. zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i];
  2560. }
  2561. if (zc->externSeqStore.pos < zc->externSeqStore.size) {
  2562. assert(zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_disable);
  2563. /* Updates ldmSeqStore.pos */
  2564. lastLLSize =
  2565. ZSTD_ldm_blockCompress(&zc->externSeqStore,
  2566. ms, &zc->seqStore,
  2567. zc->blockState.nextCBlock->rep,
  2568. zc->appliedParams.useRowMatchFinder,
  2569. src, srcSize);
  2570. assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
  2571. } else if (zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) {
  2572. rawSeqStore_t ldmSeqStore = kNullRawSeqStore;
  2573. ldmSeqStore.seq = zc->ldmSequences;
  2574. ldmSeqStore.capacity = zc->maxNbLdmSequences;
  2575. /* Updates ldmSeqStore.size */
  2576. FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
  2577. &zc->appliedParams.ldmParams,
  2578. src, srcSize), "");
  2579. /* Updates ldmSeqStore.pos */
  2580. lastLLSize =
  2581. ZSTD_ldm_blockCompress(&ldmSeqStore,
  2582. ms, &zc->seqStore,
  2583. zc->blockState.nextCBlock->rep,
  2584. zc->appliedParams.useRowMatchFinder,
  2585. src, srcSize);
  2586. assert(ldmSeqStore.pos == ldmSeqStore.size);
  2587. } else { /* not long range mode */
  2588. ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy,
  2589. zc->appliedParams.useRowMatchFinder,
  2590. dictMode);
  2591. ms->ldmSeqStore = NULL;
  2592. lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
  2593. }
  2594. { const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
  2595. ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
  2596. } }
  2597. return ZSTDbss_compress;
  2598. }
  2599. static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
  2600. {
  2601. const seqStore_t* seqStore = ZSTD_getSeqStore(zc);
  2602. const seqDef* seqStoreSeqs = seqStore->sequencesStart;
  2603. size_t seqStoreSeqSize = seqStore->sequences - seqStoreSeqs;
  2604. size_t seqStoreLiteralsSize = (size_t)(seqStore->lit - seqStore->litStart);
  2605. size_t literalsRead = 0;
  2606. size_t lastLLSize;
  2607. ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex];
  2608. size_t i;
  2609. repcodes_t updatedRepcodes;
  2610. assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences);
  2611. /* Ensure we have enough space for last literals "sequence" */
  2612. assert(zc->seqCollector.maxSequences >= seqStoreSeqSize + 1);
  2613. ZSTD_memcpy(updatedRepcodes.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
  2614. for (i = 0; i < seqStoreSeqSize; ++i) {
  2615. U32 rawOffset = seqStoreSeqs[i].offBase - ZSTD_REP_NUM;
  2616. outSeqs[i].litLength = seqStoreSeqs[i].litLength;
  2617. outSeqs[i].matchLength = seqStoreSeqs[i].mlBase + MINMATCH;
  2618. outSeqs[i].rep = 0;
  2619. if (i == seqStore->longLengthPos) {
  2620. if (seqStore->longLengthType == ZSTD_llt_literalLength) {
  2621. outSeqs[i].litLength += 0x10000;
  2622. } else if (seqStore->longLengthType == ZSTD_llt_matchLength) {
  2623. outSeqs[i].matchLength += 0x10000;
  2624. }
  2625. }
  2626. if (seqStoreSeqs[i].offBase <= ZSTD_REP_NUM) {
  2627. /* Derive the correct offset corresponding to a repcode */
  2628. outSeqs[i].rep = seqStoreSeqs[i].offBase;
  2629. if (outSeqs[i].litLength != 0) {
  2630. rawOffset = updatedRepcodes.rep[outSeqs[i].rep - 1];
  2631. } else {
  2632. if (outSeqs[i].rep == 3) {
  2633. rawOffset = updatedRepcodes.rep[0] - 1;
  2634. } else {
  2635. rawOffset = updatedRepcodes.rep[outSeqs[i].rep];
  2636. }
  2637. }
  2638. }
  2639. outSeqs[i].offset = rawOffset;
  2640. /* seqStoreSeqs[i].offset == offCode+1, and ZSTD_updateRep() expects offCode
  2641. so we provide seqStoreSeqs[i].offset - 1 */
  2642. ZSTD_updateRep(updatedRepcodes.rep,
  2643. seqStoreSeqs[i].offBase - 1,
  2644. seqStoreSeqs[i].litLength == 0);
  2645. literalsRead += outSeqs[i].litLength;
  2646. }
  2647. /* Insert last literals (if any exist) in the block as a sequence with ml == off == 0.
  2648. * If there are no last literals, then we'll emit (of: 0, ml: 0, ll: 0), which is a marker
  2649. * for the block boundary, according to the API.
  2650. */
  2651. assert(seqStoreLiteralsSize >= literalsRead);
  2652. lastLLSize = seqStoreLiteralsSize - literalsRead;
  2653. outSeqs[i].litLength = (U32)lastLLSize;
  2654. outSeqs[i].matchLength = outSeqs[i].offset = outSeqs[i].rep = 0;
  2655. seqStoreSeqSize++;
  2656. zc->seqCollector.seqIndex += seqStoreSeqSize;
  2657. }
  2658. size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
  2659. size_t outSeqsSize, const void* src, size_t srcSize)
  2660. {
  2661. const size_t dstCapacity = ZSTD_compressBound(srcSize);
  2662. void* dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem);
  2663. SeqCollector seqCollector;
  2664. RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!");
  2665. seqCollector.collectSequences = 1;
  2666. seqCollector.seqStart = outSeqs;
  2667. seqCollector.seqIndex = 0;
  2668. seqCollector.maxSequences = outSeqsSize;
  2669. zc->seqCollector = seqCollector;
  2670. ZSTD_compress2(zc, dst, dstCapacity, src, srcSize);
  2671. ZSTD_customFree(dst, ZSTD_defaultCMem);
  2672. return zc->seqCollector.seqIndex;
  2673. }
  2674. size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize) {
  2675. size_t in = 0;
  2676. size_t out = 0;
  2677. for (; in < seqsSize; ++in) {
  2678. if (sequences[in].offset == 0 && sequences[in].matchLength == 0) {
  2679. if (in != seqsSize - 1) {
  2680. sequences[in+1].litLength += sequences[in].litLength;
  2681. }
  2682. } else {
  2683. sequences[out] = sequences[in];
  2684. ++out;
  2685. }
  2686. }
  2687. return out;
  2688. }
  2689. /* Unrolled loop to read four size_ts of input at a time. Returns 1 if is RLE, 0 if not. */
  2690. static int ZSTD_isRLE(const BYTE* src, size_t length) {
  2691. const BYTE* ip = src;
  2692. const BYTE value = ip[0];
  2693. const size_t valueST = (size_t)((U64)value * 0x0101010101010101ULL);
  2694. const size_t unrollSize = sizeof(size_t) * 4;
  2695. const size_t unrollMask = unrollSize - 1;
  2696. const size_t prefixLength = length & unrollMask;
  2697. size_t i;
  2698. size_t u;
  2699. if (length == 1) return 1;
  2700. /* Check if prefix is RLE first before using unrolled loop */
  2701. if (prefixLength && ZSTD_count(ip+1, ip, ip+prefixLength) != prefixLength-1) {
  2702. return 0;
  2703. }
  2704. for (i = prefixLength; i != length; i += unrollSize) {
  2705. for (u = 0; u < unrollSize; u += sizeof(size_t)) {
  2706. if (MEM_readST(ip + i + u) != valueST) {
  2707. return 0;
  2708. }
  2709. }
  2710. }
  2711. return 1;
  2712. }
  2713. /* Returns true if the given block may be RLE.
  2714. * This is just a heuristic based on the compressibility.
  2715. * It may return both false positives and false negatives.
  2716. */
  2717. static int ZSTD_maybeRLE(seqStore_t const* seqStore)
  2718. {
  2719. size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);
  2720. size_t const nbLits = (size_t)(seqStore->lit - seqStore->litStart);
  2721. return nbSeqs < 4 && nbLits < 10;
  2722. }
  2723. static void ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* const bs)
  2724. {
  2725. ZSTD_compressedBlockState_t* const tmp = bs->prevCBlock;
  2726. bs->prevCBlock = bs->nextCBlock;
  2727. bs->nextCBlock = tmp;
  2728. }
  2729. /* Writes the block header */
  2730. static void writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastBlock) {
  2731. U32 const cBlockHeader = cSize == 1 ?
  2732. lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) :
  2733. lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
  2734. MEM_writeLE24(op, cBlockHeader);
  2735. DEBUGLOG(3, "writeBlockHeader: cSize: %zu blockSize: %zu lastBlock: %u", cSize, blockSize, lastBlock);
  2736. }
  2737. /** ZSTD_buildBlockEntropyStats_literals() :
  2738. * Builds entropy for the literals.
  2739. * Stores literals block type (raw, rle, compressed, repeat) and
  2740. * huffman description table to hufMetadata.
  2741. * Requires ENTROPY_WORKSPACE_SIZE workspace
  2742. * @return : size of huffman description table or error code */
  2743. static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSize,
  2744. const ZSTD_hufCTables_t* prevHuf,
  2745. ZSTD_hufCTables_t* nextHuf,
  2746. ZSTD_hufCTablesMetadata_t* hufMetadata,
  2747. const int literalsCompressionIsDisabled,
  2748. void* workspace, size_t wkspSize)
  2749. {
  2750. BYTE* const wkspStart = (BYTE*)workspace;
  2751. BYTE* const wkspEnd = wkspStart + wkspSize;
  2752. BYTE* const countWkspStart = wkspStart;
  2753. unsigned* const countWksp = (unsigned*)workspace;
  2754. const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned);
  2755. BYTE* const nodeWksp = countWkspStart + countWkspSize;
  2756. const size_t nodeWkspSize = wkspEnd-nodeWksp;
  2757. unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX;
  2758. unsigned huffLog = HUF_TABLELOG_DEFAULT;
  2759. HUF_repeat repeat = prevHuf->repeatMode;
  2760. DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_literals (srcSize=%zu)", srcSize);
  2761. /* Prepare nextEntropy assuming reusing the existing table */
  2762. ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
  2763. if (literalsCompressionIsDisabled) {
  2764. DEBUGLOG(5, "set_basic - disabled");
  2765. hufMetadata->hType = set_basic;
  2766. return 0;
  2767. }
  2768. /* small ? don't even attempt compression (speed opt) */
  2769. #ifndef COMPRESS_LITERALS_SIZE_MIN
  2770. #define COMPRESS_LITERALS_SIZE_MIN 63
  2771. #endif
  2772. { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
  2773. if (srcSize <= minLitSize) {
  2774. DEBUGLOG(5, "set_basic - too small");
  2775. hufMetadata->hType = set_basic;
  2776. return 0;
  2777. }
  2778. }
  2779. /* Scan input and build symbol stats */
  2780. { size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)src, srcSize, workspace, wkspSize);
  2781. FORWARD_IF_ERROR(largest, "HIST_count_wksp failed");
  2782. if (largest == srcSize) {
  2783. DEBUGLOG(5, "set_rle");
  2784. hufMetadata->hType = set_rle;
  2785. return 0;
  2786. }
  2787. if (largest <= (srcSize >> 7)+4) {
  2788. DEBUGLOG(5, "set_basic - no gain");
  2789. hufMetadata->hType = set_basic;
  2790. return 0;
  2791. }
  2792. }
  2793. /* Validate the previous Huffman table */
  2794. if (repeat == HUF_repeat_check && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) {
  2795. repeat = HUF_repeat_none;
  2796. }
  2797. /* Build Huffman Tree */
  2798. ZSTD_memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable));
  2799. huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
  2800. { size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp,
  2801. maxSymbolValue, huffLog,
  2802. nodeWksp, nodeWkspSize);
  2803. FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp");
  2804. huffLog = (U32)maxBits;
  2805. { /* Build and write the CTable */
  2806. size_t const newCSize = HUF_estimateCompressedSize(
  2807. (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue);
  2808. size_t const hSize = HUF_writeCTable_wksp(
  2809. hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer),
  2810. (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog,
  2811. nodeWksp, nodeWkspSize);
  2812. /* Check against repeating the previous CTable */
  2813. if (repeat != HUF_repeat_none) {
  2814. size_t const oldCSize = HUF_estimateCompressedSize(
  2815. (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue);
  2816. if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) {
  2817. DEBUGLOG(5, "set_repeat - smaller");
  2818. ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
  2819. hufMetadata->hType = set_repeat;
  2820. return 0;
  2821. }
  2822. }
  2823. if (newCSize + hSize >= srcSize) {
  2824. DEBUGLOG(5, "set_basic - no gains");
  2825. ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
  2826. hufMetadata->hType = set_basic;
  2827. return 0;
  2828. }
  2829. DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize);
  2830. hufMetadata->hType = set_compressed;
  2831. nextHuf->repeatMode = HUF_repeat_check;
  2832. return hSize;
  2833. }
  2834. }
  2835. }
  2836. /* ZSTD_buildDummySequencesStatistics():
  2837. * Returns a ZSTD_symbolEncodingTypeStats_t with all encoding types as set_basic,
  2838. * and updates nextEntropy to the appropriate repeatMode.
  2839. */
  2840. static ZSTD_symbolEncodingTypeStats_t
  2841. ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy) {
  2842. ZSTD_symbolEncodingTypeStats_t stats = {set_basic, set_basic, set_basic, 0, 0};
  2843. nextEntropy->litlength_repeatMode = FSE_repeat_none;
  2844. nextEntropy->offcode_repeatMode = FSE_repeat_none;
  2845. nextEntropy->matchlength_repeatMode = FSE_repeat_none;
  2846. return stats;
  2847. }
  2848. /** ZSTD_buildBlockEntropyStats_sequences() :
  2849. * Builds entropy for the sequences.
  2850. * Stores symbol compression modes and fse table to fseMetadata.
  2851. * Requires ENTROPY_WORKSPACE_SIZE wksp.
  2852. * @return : size of fse tables or error code */
  2853. static size_t ZSTD_buildBlockEntropyStats_sequences(seqStore_t* seqStorePtr,
  2854. const ZSTD_fseCTables_t* prevEntropy,
  2855. ZSTD_fseCTables_t* nextEntropy,
  2856. const ZSTD_CCtx_params* cctxParams,
  2857. ZSTD_fseCTablesMetadata_t* fseMetadata,
  2858. void* workspace, size_t wkspSize)
  2859. {
  2860. ZSTD_strategy const strategy = cctxParams->cParams.strategy;
  2861. size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
  2862. BYTE* const ostart = fseMetadata->fseTablesBuffer;
  2863. BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer);
  2864. BYTE* op = ostart;
  2865. unsigned* countWorkspace = (unsigned*)workspace;
  2866. unsigned* entropyWorkspace = countWorkspace + (MaxSeq + 1);
  2867. size_t entropyWorkspaceSize = wkspSize - (MaxSeq + 1) * sizeof(*countWorkspace);
  2868. ZSTD_symbolEncodingTypeStats_t stats;
  2869. DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_sequences (nbSeq=%zu)", nbSeq);
  2870. stats = nbSeq != 0 ? ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq,
  2871. prevEntropy, nextEntropy, op, oend,
  2872. strategy, countWorkspace,
  2873. entropyWorkspace, entropyWorkspaceSize)
  2874. : ZSTD_buildDummySequencesStatistics(nextEntropy);
  2875. FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!");
  2876. fseMetadata->llType = (symbolEncodingType_e) stats.LLtype;
  2877. fseMetadata->ofType = (symbolEncodingType_e) stats.Offtype;
  2878. fseMetadata->mlType = (symbolEncodingType_e) stats.MLtype;
  2879. fseMetadata->lastCountSize = stats.lastCountSize;
  2880. return stats.size;
  2881. }
  2882. /** ZSTD_buildBlockEntropyStats() :
  2883. * Builds entropy for the block.
  2884. * Requires workspace size ENTROPY_WORKSPACE_SIZE
  2885. *
  2886. * @return : 0 on success or error code
  2887. */
  2888. size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr,
  2889. const ZSTD_entropyCTables_t* prevEntropy,
  2890. ZSTD_entropyCTables_t* nextEntropy,
  2891. const ZSTD_CCtx_params* cctxParams,
  2892. ZSTD_entropyCTablesMetadata_t* entropyMetadata,
  2893. void* workspace, size_t wkspSize)
  2894. {
  2895. size_t const litSize = seqStorePtr->lit - seqStorePtr->litStart;
  2896. entropyMetadata->hufMetadata.hufDesSize =
  2897. ZSTD_buildBlockEntropyStats_literals(seqStorePtr->litStart, litSize,
  2898. &prevEntropy->huf, &nextEntropy->huf,
  2899. &entropyMetadata->hufMetadata,
  2900. ZSTD_literalsCompressionIsDisabled(cctxParams),
  2901. workspace, wkspSize);
  2902. FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildBlockEntropyStats_literals failed");
  2903. entropyMetadata->fseMetadata.fseTablesSize =
  2904. ZSTD_buildBlockEntropyStats_sequences(seqStorePtr,
  2905. &prevEntropy->fse, &nextEntropy->fse,
  2906. cctxParams,
  2907. &entropyMetadata->fseMetadata,
  2908. workspace, wkspSize);
  2909. FORWARD_IF_ERROR(entropyMetadata->fseMetadata.fseTablesSize, "ZSTD_buildBlockEntropyStats_sequences failed");
  2910. return 0;
  2911. }
  2912. /* Returns the size estimate for the literals section (header + content) of a block */
  2913. static size_t ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSize,
  2914. const ZSTD_hufCTables_t* huf,
  2915. const ZSTD_hufCTablesMetadata_t* hufMetadata,
  2916. void* workspace, size_t wkspSize,
  2917. int writeEntropy)
  2918. {
  2919. unsigned* const countWksp = (unsigned*)workspace;
  2920. unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX;
  2921. size_t literalSectionHeaderSize = 3 + (litSize >= 1 KB) + (litSize >= 16 KB);
  2922. U32 singleStream = litSize < 256;
  2923. if (hufMetadata->hType == set_basic) return litSize;
  2924. else if (hufMetadata->hType == set_rle) return 1;
  2925. else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) {
  2926. size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize);
  2927. if (ZSTD_isError(largest)) return litSize;
  2928. { size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue);
  2929. if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize;
  2930. if (!singleStream) cLitSizeEstimate += 6; /* multi-stream huffman uses 6-byte jump table */
  2931. return cLitSizeEstimate + literalSectionHeaderSize;
  2932. } }
  2933. assert(0); /* impossible */
  2934. return 0;
  2935. }
  2936. /* Returns the size estimate for the FSE-compressed symbols (of, ml, ll) of a block */
  2937. static size_t ZSTD_estimateBlockSize_symbolType(symbolEncodingType_e type,
  2938. const BYTE* codeTable, size_t nbSeq, unsigned maxCode,
  2939. const FSE_CTable* fseCTable,
  2940. const U8* additionalBits,
  2941. short const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
  2942. void* workspace, size_t wkspSize)
  2943. {
  2944. unsigned* const countWksp = (unsigned*)workspace;
  2945. const BYTE* ctp = codeTable;
  2946. const BYTE* const ctStart = ctp;
  2947. const BYTE* const ctEnd = ctStart + nbSeq;
  2948. size_t cSymbolTypeSizeEstimateInBits = 0;
  2949. unsigned max = maxCode;
  2950. HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */
  2951. if (type == set_basic) {
  2952. /* We selected this encoding type, so it must be valid. */
  2953. assert(max <= defaultMax);
  2954. (void)defaultMax;
  2955. cSymbolTypeSizeEstimateInBits = ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max);
  2956. } else if (type == set_rle) {
  2957. cSymbolTypeSizeEstimateInBits = 0;
  2958. } else if (type == set_compressed || type == set_repeat) {
  2959. cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max);
  2960. }
  2961. if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) {
  2962. return nbSeq * 10;
  2963. }
  2964. while (ctp < ctEnd) {
  2965. if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp];
  2966. else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */
  2967. ctp++;
  2968. }
  2969. return cSymbolTypeSizeEstimateInBits >> 3;
  2970. }
  2971. /* Returns the size estimate for the sequences section (header + content) of a block */
  2972. static size_t ZSTD_estimateBlockSize_sequences(const BYTE* ofCodeTable,
  2973. const BYTE* llCodeTable,
  2974. const BYTE* mlCodeTable,
  2975. size_t nbSeq,
  2976. const ZSTD_fseCTables_t* fseTables,
  2977. const ZSTD_fseCTablesMetadata_t* fseMetadata,
  2978. void* workspace, size_t wkspSize,
  2979. int writeEntropy)
  2980. {
  2981. size_t sequencesSectionHeaderSize = 1 /* seqHead */ + 1 /* min seqSize size */ + (nbSeq >= 128) + (nbSeq >= LONGNBSEQ);
  2982. size_t cSeqSizeEstimate = 0;
  2983. cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, nbSeq, MaxOff,
  2984. fseTables->offcodeCTable, NULL,
  2985. OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
  2986. workspace, wkspSize);
  2987. cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->llType, llCodeTable, nbSeq, MaxLL,
  2988. fseTables->litlengthCTable, LL_bits,
  2989. LL_defaultNorm, LL_defaultNormLog, MaxLL,
  2990. workspace, wkspSize);
  2991. cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, nbSeq, MaxML,
  2992. fseTables->matchlengthCTable, ML_bits,
  2993. ML_defaultNorm, ML_defaultNormLog, MaxML,
  2994. workspace, wkspSize);
  2995. if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize;
  2996. return cSeqSizeEstimate + sequencesSectionHeaderSize;
  2997. }
  2998. /* Returns the size estimate for a given stream of literals, of, ll, ml */
  2999. static size_t ZSTD_estimateBlockSize(const BYTE* literals, size_t litSize,
  3000. const BYTE* ofCodeTable,
  3001. const BYTE* llCodeTable,
  3002. const BYTE* mlCodeTable,
  3003. size_t nbSeq,
  3004. const ZSTD_entropyCTables_t* entropy,
  3005. const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
  3006. void* workspace, size_t wkspSize,
  3007. int writeLitEntropy, int writeSeqEntropy) {
  3008. size_t const literalsSize = ZSTD_estimateBlockSize_literal(literals, litSize,
  3009. &entropy->huf, &entropyMetadata->hufMetadata,
  3010. workspace, wkspSize, writeLitEntropy);
  3011. size_t const seqSize = ZSTD_estimateBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
  3012. nbSeq, &entropy->fse, &entropyMetadata->fseMetadata,
  3013. workspace, wkspSize, writeSeqEntropy);
  3014. return seqSize + literalsSize + ZSTD_blockHeaderSize;
  3015. }
  3016. /* Builds entropy statistics and uses them for blocksize estimation.
  3017. *
  3018. * Returns the estimated compressed size of the seqStore, or a zstd error.
  3019. */
  3020. static size_t ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(seqStore_t* seqStore, ZSTD_CCtx* zc) {
  3021. ZSTD_entropyCTablesMetadata_t* entropyMetadata = &zc->blockSplitCtx.entropyMetadata;
  3022. DEBUGLOG(6, "ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize()");
  3023. FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(seqStore,
  3024. &zc->blockState.prevCBlock->entropy,
  3025. &zc->blockState.nextCBlock->entropy,
  3026. &zc->appliedParams,
  3027. entropyMetadata,
  3028. zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), "");
  3029. return ZSTD_estimateBlockSize(seqStore->litStart, (size_t)(seqStore->lit - seqStore->litStart),
  3030. seqStore->ofCode, seqStore->llCode, seqStore->mlCode,
  3031. (size_t)(seqStore->sequences - seqStore->sequencesStart),
  3032. &zc->blockState.nextCBlock->entropy, entropyMetadata, zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE,
  3033. (int)(entropyMetadata->hufMetadata.hType == set_compressed), 1);
  3034. }
  3035. /* Returns literals bytes represented in a seqStore */
  3036. static size_t ZSTD_countSeqStoreLiteralsBytes(const seqStore_t* const seqStore) {
  3037. size_t literalsBytes = 0;
  3038. size_t const nbSeqs = seqStore->sequences - seqStore->sequencesStart;
  3039. size_t i;
  3040. for (i = 0; i < nbSeqs; ++i) {
  3041. seqDef seq = seqStore->sequencesStart[i];
  3042. literalsBytes += seq.litLength;
  3043. if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_literalLength) {
  3044. literalsBytes += 0x10000;
  3045. }
  3046. }
  3047. return literalsBytes;
  3048. }
  3049. /* Returns match bytes represented in a seqStore */
  3050. static size_t ZSTD_countSeqStoreMatchBytes(const seqStore_t* const seqStore) {
  3051. size_t matchBytes = 0;
  3052. size_t const nbSeqs = seqStore->sequences - seqStore->sequencesStart;
  3053. size_t i;
  3054. for (i = 0; i < nbSeqs; ++i) {
  3055. seqDef seq = seqStore->sequencesStart[i];
  3056. matchBytes += seq.mlBase + MINMATCH;
  3057. if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_matchLength) {
  3058. matchBytes += 0x10000;
  3059. }
  3060. }
  3061. return matchBytes;
  3062. }
  3063. /* Derives the seqStore that is a chunk of the originalSeqStore from [startIdx, endIdx).
  3064. * Stores the result in resultSeqStore.
  3065. */
  3066. static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore,
  3067. const seqStore_t* originalSeqStore,
  3068. size_t startIdx, size_t endIdx) {
  3069. BYTE* const litEnd = originalSeqStore->lit;
  3070. size_t literalsBytes;
  3071. size_t literalsBytesPreceding = 0;
  3072. *resultSeqStore = *originalSeqStore;
  3073. if (startIdx > 0) {
  3074. resultSeqStore->sequences = originalSeqStore->sequencesStart + startIdx;
  3075. literalsBytesPreceding = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore);
  3076. }
  3077. /* Move longLengthPos into the correct position if necessary */
  3078. if (originalSeqStore->longLengthType != ZSTD_llt_none) {
  3079. if (originalSeqStore->longLengthPos < startIdx || originalSeqStore->longLengthPos > endIdx) {
  3080. resultSeqStore->longLengthType = ZSTD_llt_none;
  3081. } else {
  3082. resultSeqStore->longLengthPos -= (U32)startIdx;
  3083. }
  3084. }
  3085. resultSeqStore->sequencesStart = originalSeqStore->sequencesStart + startIdx;
  3086. resultSeqStore->sequences = originalSeqStore->sequencesStart + endIdx;
  3087. literalsBytes = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore);
  3088. resultSeqStore->litStart += literalsBytesPreceding;
  3089. if (endIdx == (size_t)(originalSeqStore->sequences - originalSeqStore->sequencesStart)) {
  3090. /* This accounts for possible last literals if the derived chunk reaches the end of the block */
  3091. resultSeqStore->lit = litEnd;
  3092. } else {
  3093. resultSeqStore->lit = resultSeqStore->litStart+literalsBytes;
  3094. }
  3095. resultSeqStore->llCode += startIdx;
  3096. resultSeqStore->mlCode += startIdx;
  3097. resultSeqStore->ofCode += startIdx;
  3098. }
  3099. /**
  3100. * Returns the raw offset represented by the combination of offCode, ll0, and repcode history.
  3101. * offCode must represent a repcode in the numeric representation of ZSTD_storeSeq().
  3102. */
  3103. static U32
  3104. ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offCode, const U32 ll0)
  3105. {
  3106. U32 const adjustedOffCode = STORED_REPCODE(offCode) - 1 + ll0; /* [ 0 - 3 ] */
  3107. assert(STORED_IS_REPCODE(offCode));
  3108. if (adjustedOffCode == ZSTD_REP_NUM) {
  3109. /* litlength == 0 and offCode == 2 implies selection of first repcode - 1 */
  3110. assert(rep[0] > 0);
  3111. return rep[0] - 1;
  3112. }
  3113. return rep[adjustedOffCode];
  3114. }
  3115. /**
  3116. * ZSTD_seqStore_resolveOffCodes() reconciles any possible divergences in offset history that may arise
  3117. * due to emission of RLE/raw blocks that disturb the offset history,
  3118. * and replaces any repcodes within the seqStore that may be invalid.
  3119. *
  3120. * dRepcodes are updated as would be on the decompression side.
  3121. * cRepcodes are updated exactly in accordance with the seqStore.
  3122. *
  3123. * Note : this function assumes seq->offBase respects the following numbering scheme :
  3124. * 0 : invalid
  3125. * 1-3 : repcode 1-3
  3126. * 4+ : real_offset+3
  3127. */
  3128. static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_t* const cRepcodes,
  3129. seqStore_t* const seqStore, U32 const nbSeq) {
  3130. U32 idx = 0;
  3131. for (; idx < nbSeq; ++idx) {
  3132. seqDef* const seq = seqStore->sequencesStart + idx;
  3133. U32 const ll0 = (seq->litLength == 0);
  3134. U32 const offCode = OFFBASE_TO_STORED(seq->offBase);
  3135. assert(seq->offBase > 0);
  3136. if (STORED_IS_REPCODE(offCode)) {
  3137. U32 const dRawOffset = ZSTD_resolveRepcodeToRawOffset(dRepcodes->rep, offCode, ll0);
  3138. U32 const cRawOffset = ZSTD_resolveRepcodeToRawOffset(cRepcodes->rep, offCode, ll0);
  3139. /* Adjust simulated decompression repcode history if we come across a mismatch. Replace
  3140. * the repcode with the offset it actually references, determined by the compression
  3141. * repcode history.
  3142. */
  3143. if (dRawOffset != cRawOffset) {
  3144. seq->offBase = cRawOffset + ZSTD_REP_NUM;
  3145. }
  3146. }
  3147. /* Compression repcode history is always updated with values directly from the unmodified seqStore.
  3148. * Decompression repcode history may use modified seq->offset value taken from compression repcode history.
  3149. */
  3150. ZSTD_updateRep(dRepcodes->rep, OFFBASE_TO_STORED(seq->offBase), ll0);
  3151. ZSTD_updateRep(cRepcodes->rep, offCode, ll0);
  3152. }
  3153. }
  3154. /* ZSTD_compressSeqStore_singleBlock():
  3155. * Compresses a seqStore into a block with a block header, into the buffer dst.
  3156. *
  3157. * Returns the total size of that block (including header) or a ZSTD error code.
  3158. */
  3159. static size_t
  3160. ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore,
  3161. repcodes_t* const dRep, repcodes_t* const cRep,
  3162. void* dst, size_t dstCapacity,
  3163. const void* src, size_t srcSize,
  3164. U32 lastBlock, U32 isPartition)
  3165. {
  3166. const U32 rleMaxLength = 25;
  3167. BYTE* op = (BYTE*)dst;
  3168. const BYTE* ip = (const BYTE*)src;
  3169. size_t cSize;
  3170. size_t cSeqsSize;
  3171. /* In case of an RLE or raw block, the simulated decompression repcode history must be reset */
  3172. repcodes_t const dRepOriginal = *dRep;
  3173. DEBUGLOG(5, "ZSTD_compressSeqStore_singleBlock");
  3174. if (isPartition)
  3175. ZSTD_seqStore_resolveOffCodes(dRep, cRep, seqStore, (U32)(seqStore->sequences - seqStore->sequencesStart));
  3176. RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "Block header doesn't fit");
  3177. cSeqsSize = ZSTD_entropyCompressSeqStore(seqStore,
  3178. &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
  3179. &zc->appliedParams,
  3180. op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize,
  3181. srcSize,
  3182. zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
  3183. zc->bmi2);
  3184. FORWARD_IF_ERROR(cSeqsSize, "ZSTD_entropyCompressSeqStore failed!");
  3185. if (!zc->isFirstBlock &&
  3186. cSeqsSize < rleMaxLength &&
  3187. ZSTD_isRLE((BYTE const*)src, srcSize)) {
  3188. /* We don't want to emit our first block as a RLE even if it qualifies because
  3189. * doing so will cause the decoder (cli only) to throw a "should consume all input error."
  3190. * This is only an issue for zstd <= v1.4.3
  3191. */
  3192. cSeqsSize = 1;
  3193. }
  3194. if (zc->seqCollector.collectSequences) {
  3195. ZSTD_copyBlockSequences(zc);
  3196. ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
  3197. return 0;
  3198. }
  3199. if (cSeqsSize == 0) {
  3200. cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock);
  3201. FORWARD_IF_ERROR(cSize, "Nocompress block failed");
  3202. DEBUGLOG(4, "Writing out nocompress block, size: %zu", cSize);
  3203. *dRep = dRepOriginal; /* reset simulated decompression repcode history */
  3204. } else if (cSeqsSize == 1) {
  3205. cSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, srcSize, lastBlock);
  3206. FORWARD_IF_ERROR(cSize, "RLE compress block failed");
  3207. DEBUGLOG(4, "Writing out RLE block, size: %zu", cSize);
  3208. *dRep = dRepOriginal; /* reset simulated decompression repcode history */
  3209. } else {
  3210. ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
  3211. writeBlockHeader(op, cSeqsSize, srcSize, lastBlock);
  3212. cSize = ZSTD_blockHeaderSize + cSeqsSize;
  3213. DEBUGLOG(4, "Writing out compressed block, size: %zu", cSize);
  3214. }
  3215. if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
  3216. zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
  3217. return cSize;
  3218. }
  3219. /* Struct to keep track of where we are in our recursive calls. */
  3220. typedef struct {
  3221. U32* splitLocations; /* Array of split indices */
  3222. size_t idx; /* The current index within splitLocations being worked on */
  3223. } seqStoreSplits;
  3224. #define MIN_SEQUENCES_BLOCK_SPLITTING 300
  3225. /* Helper function to perform the recursive search for block splits.
  3226. * Estimates the cost of seqStore prior to split, and estimates the cost of splitting the sequences in half.
  3227. * If advantageous to split, then we recurse down the two sub-blocks. If not, or if an error occurred in estimation, then
  3228. * we do not recurse.
  3229. *
  3230. * Note: The recursion depth is capped by a heuristic minimum number of sequences, defined by MIN_SEQUENCES_BLOCK_SPLITTING.
  3231. * In theory, this means the absolute largest recursion depth is 10 == log2(maxNbSeqInBlock/MIN_SEQUENCES_BLOCK_SPLITTING).
  3232. * In practice, recursion depth usually doesn't go beyond 4.
  3233. *
  3234. * Furthermore, the number of splits is capped by ZSTD_MAX_NB_BLOCK_SPLITS. At ZSTD_MAX_NB_BLOCK_SPLITS == 196 with the current existing blockSize
  3235. * maximum of 128 KB, this value is actually impossible to reach.
  3236. */
  3237. static void
  3238. ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t endIdx,
  3239. ZSTD_CCtx* zc, const seqStore_t* origSeqStore)
  3240. {
  3241. seqStore_t* fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk;
  3242. seqStore_t* firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore;
  3243. seqStore_t* secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore;
  3244. size_t estimatedOriginalSize;
  3245. size_t estimatedFirstHalfSize;
  3246. size_t estimatedSecondHalfSize;
  3247. size_t midIdx = (startIdx + endIdx)/2;
  3248. if (endIdx - startIdx < MIN_SEQUENCES_BLOCK_SPLITTING || splits->idx >= ZSTD_MAX_NB_BLOCK_SPLITS) {
  3249. DEBUGLOG(6, "ZSTD_deriveBlockSplitsHelper: Too few sequences");
  3250. return;
  3251. }
  3252. DEBUGLOG(4, "ZSTD_deriveBlockSplitsHelper: startIdx=%zu endIdx=%zu", startIdx, endIdx);
  3253. ZSTD_deriveSeqStoreChunk(fullSeqStoreChunk, origSeqStore, startIdx, endIdx);
  3254. ZSTD_deriveSeqStoreChunk(firstHalfSeqStore, origSeqStore, startIdx, midIdx);
  3255. ZSTD_deriveSeqStoreChunk(secondHalfSeqStore, origSeqStore, midIdx, endIdx);
  3256. estimatedOriginalSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(fullSeqStoreChunk, zc);
  3257. estimatedFirstHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(firstHalfSeqStore, zc);
  3258. estimatedSecondHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(secondHalfSeqStore, zc);
  3259. DEBUGLOG(4, "Estimated original block size: %zu -- First half split: %zu -- Second half split: %zu",
  3260. estimatedOriginalSize, estimatedFirstHalfSize, estimatedSecondHalfSize);
  3261. if (ZSTD_isError(estimatedOriginalSize) || ZSTD_isError(estimatedFirstHalfSize) || ZSTD_isError(estimatedSecondHalfSize)) {
  3262. return;
  3263. }
  3264. if (estimatedFirstHalfSize + estimatedSecondHalfSize < estimatedOriginalSize) {
  3265. ZSTD_deriveBlockSplitsHelper(splits, startIdx, midIdx, zc, origSeqStore);
  3266. splits->splitLocations[splits->idx] = (U32)midIdx;
  3267. splits->idx++;
  3268. ZSTD_deriveBlockSplitsHelper(splits, midIdx, endIdx, zc, origSeqStore);
  3269. }
  3270. }
  3271. /* Base recursive function. Populates a table with intra-block partition indices that can improve compression ratio.
  3272. *
  3273. * Returns the number of splits made (which equals the size of the partition table - 1).
  3274. */
  3275. static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq) {
  3276. seqStoreSplits splits = {partitions, 0};
  3277. if (nbSeq <= 4) {
  3278. DEBUGLOG(4, "ZSTD_deriveBlockSplits: Too few sequences to split");
  3279. /* Refuse to try and split anything with less than 4 sequences */
  3280. return 0;
  3281. }
  3282. ZSTD_deriveBlockSplitsHelper(&splits, 0, nbSeq, zc, &zc->seqStore);
  3283. splits.splitLocations[splits.idx] = nbSeq;
  3284. DEBUGLOG(5, "ZSTD_deriveBlockSplits: final nb partitions: %zu", splits.idx+1);
  3285. return splits.idx;
  3286. }
  3287. /* ZSTD_compressBlock_splitBlock():
  3288. * Attempts to split a given block into multiple blocks to improve compression ratio.
  3289. *
  3290. * Returns combined size of all blocks (which includes headers), or a ZSTD error code.
  3291. */
  3292. static size_t
  3293. ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity,
  3294. const void* src, size_t blockSize, U32 lastBlock, U32 nbSeq)
  3295. {
  3296. size_t cSize = 0;
  3297. const BYTE* ip = (const BYTE*)src;
  3298. BYTE* op = (BYTE*)dst;
  3299. size_t i = 0;
  3300. size_t srcBytesTotal = 0;
  3301. U32* partitions = zc->blockSplitCtx.partitions; /* size == ZSTD_MAX_NB_BLOCK_SPLITS */
  3302. seqStore_t* nextSeqStore = &zc->blockSplitCtx.nextSeqStore;
  3303. seqStore_t* currSeqStore = &zc->blockSplitCtx.currSeqStore;
  3304. size_t numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq);
  3305. /* If a block is split and some partitions are emitted as RLE/uncompressed, then repcode history
  3306. * may become invalid. In order to reconcile potentially invalid repcodes, we keep track of two
  3307. * separate repcode histories that simulate repcode history on compression and decompression side,
  3308. * and use the histories to determine whether we must replace a particular repcode with its raw offset.
  3309. *
  3310. * 1) cRep gets updated for each partition, regardless of whether the block was emitted as uncompressed
  3311. * or RLE. This allows us to retrieve the offset value that an invalid repcode references within
  3312. * a nocompress/RLE block.
  3313. * 2) dRep gets updated only for compressed partitions, and when a repcode gets replaced, will use
  3314. * the replacement offset value rather than the original repcode to update the repcode history.
  3315. * dRep also will be the final repcode history sent to the next block.
  3316. *
  3317. * See ZSTD_seqStore_resolveOffCodes() for more details.
  3318. */
  3319. repcodes_t dRep;
  3320. repcodes_t cRep;
  3321. ZSTD_memcpy(dRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
  3322. ZSTD_memcpy(cRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
  3323. ZSTD_memset(nextSeqStore, 0, sizeof(seqStore_t));
  3324. DEBUGLOG(4, "ZSTD_compressBlock_splitBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
  3325. (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
  3326. (unsigned)zc->blockState.matchState.nextToUpdate);
  3327. if (numSplits == 0) {
  3328. size_t cSizeSingleBlock = ZSTD_compressSeqStore_singleBlock(zc, &zc->seqStore,
  3329. &dRep, &cRep,
  3330. op, dstCapacity,
  3331. ip, blockSize,
  3332. lastBlock, 0 /* isPartition */);
  3333. FORWARD_IF_ERROR(cSizeSingleBlock, "Compressing single block from splitBlock_internal() failed!");
  3334. DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal: No splits");
  3335. assert(cSizeSingleBlock <= ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize);
  3336. return cSizeSingleBlock;
  3337. }
  3338. ZSTD_deriveSeqStoreChunk(currSeqStore, &zc->seqStore, 0, partitions[0]);
  3339. for (i = 0; i <= numSplits; ++i) {
  3340. size_t srcBytes;
  3341. size_t cSizeChunk;
  3342. U32 const lastPartition = (i == numSplits);
  3343. U32 lastBlockEntireSrc = 0;
  3344. srcBytes = ZSTD_countSeqStoreLiteralsBytes(currSeqStore) + ZSTD_countSeqStoreMatchBytes(currSeqStore);
  3345. srcBytesTotal += srcBytes;
  3346. if (lastPartition) {
  3347. /* This is the final partition, need to account for possible last literals */
  3348. srcBytes += blockSize - srcBytesTotal;
  3349. lastBlockEntireSrc = lastBlock;
  3350. } else {
  3351. ZSTD_deriveSeqStoreChunk(nextSeqStore, &zc->seqStore, partitions[i], partitions[i+1]);
  3352. }
  3353. cSizeChunk = ZSTD_compressSeqStore_singleBlock(zc, currSeqStore,
  3354. &dRep, &cRep,
  3355. op, dstCapacity,
  3356. ip, srcBytes,
  3357. lastBlockEntireSrc, 1 /* isPartition */);
  3358. DEBUGLOG(5, "Estimated size: %zu actual size: %zu", ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(currSeqStore, zc), cSizeChunk);
  3359. FORWARD_IF_ERROR(cSizeChunk, "Compressing chunk failed!");
  3360. ip += srcBytes;
  3361. op += cSizeChunk;
  3362. dstCapacity -= cSizeChunk;
  3363. cSize += cSizeChunk;
  3364. *currSeqStore = *nextSeqStore;
  3365. assert(cSizeChunk <= ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize);
  3366. }
  3367. /* cRep and dRep may have diverged during the compression. If so, we use the dRep repcodes
  3368. * for the next block.
  3369. */
  3370. ZSTD_memcpy(zc->blockState.prevCBlock->rep, dRep.rep, sizeof(repcodes_t));
  3371. return cSize;
  3372. }
  3373. static size_t
  3374. ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc,
  3375. void* dst, size_t dstCapacity,
  3376. const void* src, size_t srcSize, U32 lastBlock)
  3377. {
  3378. const BYTE* ip = (const BYTE*)src;
  3379. BYTE* op = (BYTE*)dst;
  3380. U32 nbSeq;
  3381. size_t cSize;
  3382. DEBUGLOG(4, "ZSTD_compressBlock_splitBlock");
  3383. assert(zc->appliedParams.useBlockSplitter == ZSTD_ps_enable);
  3384. { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
  3385. FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
  3386. if (bss == ZSTDbss_noCompress) {
  3387. if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
  3388. zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
  3389. cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock);
  3390. FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
  3391. DEBUGLOG(4, "ZSTD_compressBlock_splitBlock: Nocompress block");
  3392. return cSize;
  3393. }
  3394. nbSeq = (U32)(zc->seqStore.sequences - zc->seqStore.sequencesStart);
  3395. }
  3396. cSize = ZSTD_compressBlock_splitBlock_internal(zc, dst, dstCapacity, src, srcSize, lastBlock, nbSeq);
  3397. FORWARD_IF_ERROR(cSize, "Splitting blocks failed!");
  3398. return cSize;
  3399. }
  3400. static size_t
  3401. ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
  3402. void* dst, size_t dstCapacity,
  3403. const void* src, size_t srcSize, U32 frame)
  3404. {
  3405. /* This the upper bound for the length of an rle block.
  3406. * This isn't the actual upper bound. Finding the real threshold
  3407. * needs further investigation.
  3408. */
  3409. const U32 rleMaxLength = 25;
  3410. size_t cSize;
  3411. const BYTE* ip = (const BYTE*)src;
  3412. BYTE* op = (BYTE*)dst;
  3413. DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
  3414. (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
  3415. (unsigned)zc->blockState.matchState.nextToUpdate);
  3416. { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
  3417. FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
  3418. if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; }
  3419. }
  3420. if (zc->seqCollector.collectSequences) {
  3421. ZSTD_copyBlockSequences(zc);
  3422. ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
  3423. return 0;
  3424. }
  3425. /* encode sequences and literals */
  3426. cSize = ZSTD_entropyCompressSeqStore(&zc->seqStore,
  3427. &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
  3428. &zc->appliedParams,
  3429. dst, dstCapacity,
  3430. srcSize,
  3431. zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
  3432. zc->bmi2);
  3433. if (frame &&
  3434. /* We don't want to emit our first block as a RLE even if it qualifies because
  3435. * doing so will cause the decoder (cli only) to throw a "should consume all input error."
  3436. * This is only an issue for zstd <= v1.4.3
  3437. */
  3438. !zc->isFirstBlock &&
  3439. cSize < rleMaxLength &&
  3440. ZSTD_isRLE(ip, srcSize))
  3441. {
  3442. cSize = 1;
  3443. op[0] = ip[0];
  3444. }
  3445. out:
  3446. if (!ZSTD_isError(cSize) && cSize > 1) {
  3447. ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
  3448. }
  3449. /* We check that dictionaries have offset codes available for the first
  3450. * block. After the first block, the offcode table might not have large
  3451. * enough codes to represent the offsets in the data.
  3452. */
  3453. if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
  3454. zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
  3455. return cSize;
  3456. }
  3457. static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc,
  3458. void* dst, size_t dstCapacity,
  3459. const void* src, size_t srcSize,
  3460. const size_t bss, U32 lastBlock)
  3461. {
  3462. DEBUGLOG(6, "Attempting ZSTD_compressSuperBlock()");
  3463. if (bss == ZSTDbss_compress) {
  3464. if (/* We don't want to emit our first block as a RLE even if it qualifies because
  3465. * doing so will cause the decoder (cli only) to throw a "should consume all input error."
  3466. * This is only an issue for zstd <= v1.4.3
  3467. */
  3468. !zc->isFirstBlock &&
  3469. ZSTD_maybeRLE(&zc->seqStore) &&
  3470. ZSTD_isRLE((BYTE const*)src, srcSize))
  3471. {
  3472. return ZSTD_rleCompressBlock(dst, dstCapacity, *(BYTE const*)src, srcSize, lastBlock);
  3473. }
  3474. /* Attempt superblock compression.
  3475. *
  3476. * Note that compressed size of ZSTD_compressSuperBlock() is not bound by the
  3477. * standard ZSTD_compressBound(). This is a problem, because even if we have
  3478. * space now, taking an extra byte now could cause us to run out of space later
  3479. * and violate ZSTD_compressBound().
  3480. *
  3481. * Define blockBound(blockSize) = blockSize + ZSTD_blockHeaderSize.
  3482. *
  3483. * In order to respect ZSTD_compressBound() we must attempt to emit a raw
  3484. * uncompressed block in these cases:
  3485. * * cSize == 0: Return code for an uncompressed block.
  3486. * * cSize == dstSize_tooSmall: We may have expanded beyond blockBound(srcSize).
  3487. * ZSTD_noCompressBlock() will return dstSize_tooSmall if we are really out of
  3488. * output space.
  3489. * * cSize >= blockBound(srcSize): We have expanded the block too much so
  3490. * emit an uncompressed block.
  3491. */
  3492. {
  3493. size_t const cSize = ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock);
  3494. if (cSize != ERROR(dstSize_tooSmall)) {
  3495. size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy);
  3496. FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed");
  3497. if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) {
  3498. ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
  3499. return cSize;
  3500. }
  3501. }
  3502. }
  3503. }
  3504. DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()");
  3505. /* Superblock compression failed, attempt to emit a single no compress block.
  3506. * The decoder will be able to stream this block since it is uncompressed.
  3507. */
  3508. return ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock);
  3509. }
  3510. static size_t ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx* zc,
  3511. void* dst, size_t dstCapacity,
  3512. const void* src, size_t srcSize,
  3513. U32 lastBlock)
  3514. {
  3515. size_t cSize = 0;
  3516. const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
  3517. DEBUGLOG(5, "ZSTD_compressBlock_targetCBlockSize (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u, srcSize=%zu)",
  3518. (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate, srcSize);
  3519. FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
  3520. cSize = ZSTD_compressBlock_targetCBlockSize_body(zc, dst, dstCapacity, src, srcSize, bss, lastBlock);
  3521. FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize_body failed");
  3522. if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
  3523. zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
  3524. return cSize;
  3525. }
  3526. static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
  3527. ZSTD_cwksp* ws,
  3528. ZSTD_CCtx_params const* params,
  3529. void const* ip,
  3530. void const* iend)
  3531. {
  3532. U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy);
  3533. U32 const maxDist = (U32)1 << params->cParams.windowLog;
  3534. if (ZSTD_window_needOverflowCorrection(ms->window, cycleLog, maxDist, ms->loadedDictEnd, ip, iend)) {
  3535. U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip);
  3536. ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
  3537. ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
  3538. ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
  3539. ZSTD_cwksp_mark_tables_dirty(ws);
  3540. ZSTD_reduceIndex(ms, params, correction);
  3541. ZSTD_cwksp_mark_tables_clean(ws);
  3542. if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
  3543. else ms->nextToUpdate -= correction;
  3544. /* invalidate dictionaries on overflow correction */
  3545. ms->loadedDictEnd = 0;
  3546. ms->dictMatchState = NULL;
  3547. }
  3548. }
  3549. /*! ZSTD_compress_frameChunk() :
  3550. * Compress a chunk of data into one or multiple blocks.
  3551. * All blocks will be terminated, all input will be consumed.
  3552. * Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
  3553. * Frame is supposed already started (header already produced)
  3554. * @return : compressed size, or an error code
  3555. */
  3556. static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
  3557. void* dst, size_t dstCapacity,
  3558. const void* src, size_t srcSize,
  3559. U32 lastFrameChunk)
  3560. {
  3561. size_t blockSize = cctx->blockSize;
  3562. size_t remaining = srcSize;
  3563. const BYTE* ip = (const BYTE*)src;
  3564. BYTE* const ostart = (BYTE*)dst;
  3565. BYTE* op = ostart;
  3566. U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
  3567. assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX);
  3568. DEBUGLOG(4, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
  3569. if (cctx->appliedParams.fParams.checksumFlag && srcSize)
  3570. XXH64_update(&cctx->xxhState, src, srcSize);
  3571. while (remaining) {
  3572. ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
  3573. U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
  3574. RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE,
  3575. dstSize_tooSmall,
  3576. "not enough space to store compressed block");
  3577. if (remaining < blockSize) blockSize = remaining;
  3578. ZSTD_overflowCorrectIfNeeded(
  3579. ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize);
  3580. ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
  3581. ZSTD_window_enforceMaxDist(&ms->window, ip, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
  3582. /* Ensure hash/chain table insertion resumes no sooner than lowlimit */
  3583. if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit;
  3584. { size_t cSize;
  3585. if (ZSTD_useTargetCBlockSize(&cctx->appliedParams)) {
  3586. cSize = ZSTD_compressBlock_targetCBlockSize(cctx, op, dstCapacity, ip, blockSize, lastBlock);
  3587. FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize failed");
  3588. assert(cSize > 0);
  3589. assert(cSize <= blockSize + ZSTD_blockHeaderSize);
  3590. } else if (ZSTD_blockSplitterEnabled(&cctx->appliedParams)) {
  3591. cSize = ZSTD_compressBlock_splitBlock(cctx, op, dstCapacity, ip, blockSize, lastBlock);
  3592. FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_splitBlock failed");
  3593. assert(cSize > 0 || cctx->seqCollector.collectSequences == 1);
  3594. } else {
  3595. cSize = ZSTD_compressBlock_internal(cctx,
  3596. op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
  3597. ip, blockSize, 1 /* frame */);
  3598. FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_internal failed");
  3599. if (cSize == 0) { /* block is not compressible */
  3600. cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
  3601. FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
  3602. } else {
  3603. U32 const cBlockHeader = cSize == 1 ?
  3604. lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) :
  3605. lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
  3606. MEM_writeLE24(op, cBlockHeader);
  3607. cSize += ZSTD_blockHeaderSize;
  3608. }
  3609. }
  3610. ip += blockSize;
  3611. assert(remaining >= blockSize);
  3612. remaining -= blockSize;
  3613. op += cSize;
  3614. assert(dstCapacity >= cSize);
  3615. dstCapacity -= cSize;
  3616. cctx->isFirstBlock = 0;
  3617. DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
  3618. (unsigned)cSize);
  3619. } }
  3620. if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
  3621. return (size_t)(op-ostart);
  3622. }
  3623. static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
  3624. const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID)
  3625. { BYTE* const op = (BYTE*)dst;
  3626. U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */
  3627. U32 const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */
  3628. U32 const checksumFlag = params->fParams.checksumFlag>0;
  3629. U32 const windowSize = (U32)1 << params->cParams.windowLog;
  3630. U32 const singleSegment = params->fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
  3631. BYTE const windowLogByte = (BYTE)((params->cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
  3632. U32 const fcsCode = params->fParams.contentSizeFlag ?
  3633. (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0; /* 0-3 */
  3634. BYTE const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
  3635. size_t pos=0;
  3636. assert(!(params->fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
  3637. RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall,
  3638. "dst buf is too small to fit worst-case frame header size.");
  3639. DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
  3640. !params->fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
  3641. if (params->format == ZSTD_f_zstd1) {
  3642. MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
  3643. pos = 4;
  3644. }
  3645. op[pos++] = frameHeaderDescriptionByte;
  3646. if (!singleSegment) op[pos++] = windowLogByte;
  3647. switch(dictIDSizeCode)
  3648. {
  3649. default:
  3650. assert(0); /* impossible */
  3651. ZSTD_FALLTHROUGH;
  3652. case 0 : break;
  3653. case 1 : op[pos] = (BYTE)(dictID); pos++; break;
  3654. case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break;
  3655. case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break;
  3656. }
  3657. switch(fcsCode)
  3658. {
  3659. default:
  3660. assert(0); /* impossible */
  3661. ZSTD_FALLTHROUGH;
  3662. case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break;
  3663. case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break;
  3664. case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break;
  3665. case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break;
  3666. }
  3667. return pos;
  3668. }
  3669. /* ZSTD_writeSkippableFrame_advanced() :
  3670. * Writes out a skippable frame with the specified magic number variant (16 are supported),
  3671. * from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15, and the desired source data.
  3672. *
  3673. * Returns the total number of bytes written, or a ZSTD error code.
  3674. */
  3675. size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
  3676. const void* src, size_t srcSize, unsigned magicVariant) {
  3677. BYTE* op = (BYTE*)dst;
  3678. RETURN_ERROR_IF(dstCapacity < srcSize + ZSTD_SKIPPABLEHEADERSIZE /* Skippable frame overhead */,
  3679. dstSize_tooSmall, "Not enough room for skippable frame");
  3680. RETURN_ERROR_IF(srcSize > (unsigned)0xFFFFFFFF, srcSize_wrong, "Src size too large for skippable frame");
  3681. RETURN_ERROR_IF(magicVariant > 15, parameter_outOfBound, "Skippable frame magic number variant not supported");
  3682. MEM_writeLE32(op, (U32)(ZSTD_MAGIC_SKIPPABLE_START + magicVariant));
  3683. MEM_writeLE32(op+4, (U32)srcSize);
  3684. ZSTD_memcpy(op+8, src, srcSize);
  3685. return srcSize + ZSTD_SKIPPABLEHEADERSIZE;
  3686. }
  3687. /* ZSTD_writeLastEmptyBlock() :
  3688. * output an empty Block with end-of-frame mark to complete a frame
  3689. * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
  3690. * or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
  3691. */
  3692. size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
  3693. {
  3694. RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall,
  3695. "dst buf is too small to write frame trailer empty block.");
  3696. { U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1); /* 0 size */
  3697. MEM_writeLE24(dst, cBlockHeader24);
  3698. return ZSTD_blockHeaderSize;
  3699. }
  3700. }
  3701. size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
  3702. {
  3703. RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong,
  3704. "wrong cctx stage");
  3705. RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable,
  3706. parameter_unsupported,
  3707. "incompatible with ldm");
  3708. cctx->externSeqStore.seq = seq;
  3709. cctx->externSeqStore.size = nbSeq;
  3710. cctx->externSeqStore.capacity = nbSeq;
  3711. cctx->externSeqStore.pos = 0;
  3712. cctx->externSeqStore.posInSequence = 0;
  3713. return 0;
  3714. }
  3715. static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
  3716. void* dst, size_t dstCapacity,
  3717. const void* src, size_t srcSize,
  3718. U32 frame, U32 lastFrameChunk)
  3719. {
  3720. ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
  3721. size_t fhSize = 0;
  3722. DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
  3723. cctx->stage, (unsigned)srcSize);
  3724. RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong,
  3725. "missing init (ZSTD_compressBegin)");
  3726. if (frame && (cctx->stage==ZSTDcs_init)) {
  3727. fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams,
  3728. cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
  3729. FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
  3730. assert(fhSize <= dstCapacity);
  3731. dstCapacity -= fhSize;
  3732. dst = (char*)dst + fhSize;
  3733. cctx->stage = ZSTDcs_ongoing;
  3734. }
  3735. if (!srcSize) return fhSize; /* do not generate an empty block if no input */
  3736. if (!ZSTD_window_update(&ms->window, src, srcSize, ms->forceNonContiguous)) {
  3737. ms->forceNonContiguous = 0;
  3738. ms->nextToUpdate = ms->window.dictLimit;
  3739. }
  3740. if (cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) {
  3741. ZSTD_window_update(&cctx->ldmState.window, src, srcSize, /* forceNonContiguous */ 0);
  3742. }
  3743. if (!frame) {
  3744. /* overflow check and correction for block mode */
  3745. ZSTD_overflowCorrectIfNeeded(
  3746. ms, &cctx->workspace, &cctx->appliedParams,
  3747. src, (BYTE const*)src + srcSize);
  3748. }
  3749. DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
  3750. { size_t const cSize = frame ?
  3751. ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
  3752. ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */);
  3753. FORWARD_IF_ERROR(cSize, "%s", frame ? "ZSTD_compress_frameChunk failed" : "ZSTD_compressBlock_internal failed");
  3754. cctx->consumedSrcSize += srcSize;
  3755. cctx->producedCSize += (cSize + fhSize);
  3756. assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
  3757. if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
  3758. ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
  3759. RETURN_ERROR_IF(
  3760. cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne,
  3761. srcSize_wrong,
  3762. "error : pledgedSrcSize = %u, while realSrcSize >= %u",
  3763. (unsigned)cctx->pledgedSrcSizePlusOne-1,
  3764. (unsigned)cctx->consumedSrcSize);
  3765. }
  3766. return cSize + fhSize;
  3767. }
  3768. }
  3769. size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
  3770. void* dst, size_t dstCapacity,
  3771. const void* src, size_t srcSize)
  3772. {
  3773. DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize);
  3774. return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
  3775. }
  3776. size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
  3777. {
  3778. ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams;
  3779. assert(!ZSTD_checkCParams(cParams));
  3780. return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog);
  3781. }
  3782. size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
  3783. {
  3784. DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize);
  3785. { size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
  3786. RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong, "input is larger than a block"); }
  3787. return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
  3788. }
  3789. /*! ZSTD_loadDictionaryContent() :
  3790. * @return : 0, or an error code
  3791. */
  3792. static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
  3793. ldmState_t* ls,
  3794. ZSTD_cwksp* ws,
  3795. ZSTD_CCtx_params const* params,
  3796. const void* src, size_t srcSize,
  3797. ZSTD_dictTableLoadMethod_e dtlm)
  3798. {
  3799. const BYTE* ip = (const BYTE*) src;
  3800. const BYTE* const iend = ip + srcSize;
  3801. int const loadLdmDict = params->ldmParams.enableLdm == ZSTD_ps_enable && ls != NULL;
  3802. /* Assert that we the ms params match the params we're being given */
  3803. ZSTD_assertEqualCParams(params->cParams, ms->cParams);
  3804. if (srcSize > ZSTD_CHUNKSIZE_MAX) {
  3805. /* Allow the dictionary to set indices up to exactly ZSTD_CURRENT_MAX.
  3806. * Dictionaries right at the edge will immediately trigger overflow
  3807. * correction, but I don't want to insert extra constraints here.
  3808. */
  3809. U32 const maxDictSize = ZSTD_CURRENT_MAX - 1;
  3810. /* We must have cleared our windows when our source is this large. */
  3811. assert(ZSTD_window_isEmpty(ms->window));
  3812. if (loadLdmDict)
  3813. assert(ZSTD_window_isEmpty(ls->window));
  3814. /* If the dictionary is too large, only load the suffix of the dictionary. */
  3815. if (srcSize > maxDictSize) {
  3816. ip = iend - maxDictSize;
  3817. src = ip;
  3818. srcSize = maxDictSize;
  3819. }
  3820. }
  3821. DEBUGLOG(4, "ZSTD_loadDictionaryContent(): useRowMatchFinder=%d", (int)params->useRowMatchFinder);
  3822. ZSTD_window_update(&ms->window, src, srcSize, /* forceNonContiguous */ 0);
  3823. ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
  3824. ms->forceNonContiguous = params->deterministicRefPrefix;
  3825. if (loadLdmDict) {
  3826. ZSTD_window_update(&ls->window, src, srcSize, /* forceNonContiguous */ 0);
  3827. ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base);
  3828. }
  3829. if (srcSize <= HASH_READ_SIZE) return 0;
  3830. ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, iend);
  3831. if (loadLdmDict)
  3832. ZSTD_ldm_fillHashTable(ls, ip, iend, &params->ldmParams);
  3833. switch(params->cParams.strategy)
  3834. {
  3835. case ZSTD_fast:
  3836. ZSTD_fillHashTable(ms, iend, dtlm);
  3837. break;
  3838. case ZSTD_dfast:
  3839. ZSTD_fillDoubleHashTable(ms, iend, dtlm);
  3840. break;
  3841. case ZSTD_greedy:
  3842. case ZSTD_lazy:
  3843. case ZSTD_lazy2:
  3844. assert(srcSize >= HASH_READ_SIZE);
  3845. if (ms->dedicatedDictSearch) {
  3846. assert(ms->chainTable != NULL);
  3847. ZSTD_dedicatedDictSearch_lazy_loadDictionary(ms, iend-HASH_READ_SIZE);
  3848. } else {
  3849. assert(params->useRowMatchFinder != ZSTD_ps_auto);
  3850. if (params->useRowMatchFinder == ZSTD_ps_enable) {
  3851. size_t const tagTableSize = ((size_t)1 << params->cParams.hashLog) * sizeof(U16);
  3852. ZSTD_memset(ms->tagTable, 0, tagTableSize);
  3853. ZSTD_row_update(ms, iend-HASH_READ_SIZE);
  3854. DEBUGLOG(4, "Using row-based hash table for lazy dict");
  3855. } else {
  3856. ZSTD_insertAndFindFirstIndex(ms, iend-HASH_READ_SIZE);
  3857. DEBUGLOG(4, "Using chain-based hash table for lazy dict");
  3858. }
  3859. }
  3860. break;
  3861. case ZSTD_btlazy2: /* we want the dictionary table fully sorted */
  3862. case ZSTD_btopt:
  3863. case ZSTD_btultra:
  3864. case ZSTD_btultra2:
  3865. assert(srcSize >= HASH_READ_SIZE);
  3866. ZSTD_updateTree(ms, iend-HASH_READ_SIZE, iend);
  3867. break;
  3868. default:
  3869. assert(0); /* not possible : not a valid strategy id */
  3870. }
  3871. ms->nextToUpdate = (U32)(iend - ms->window.base);
  3872. return 0;
  3873. }
  3874. /* Dictionaries that assign zero probability to symbols that show up causes problems
  3875. * when FSE encoding. Mark dictionaries with zero probability symbols as FSE_repeat_check
  3876. * and only dictionaries with 100% valid symbols can be assumed valid.
  3877. */
  3878. static FSE_repeat ZSTD_dictNCountRepeat(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue)
  3879. {
  3880. U32 s;
  3881. if (dictMaxSymbolValue < maxSymbolValue) {
  3882. return FSE_repeat_check;
  3883. }
  3884. for (s = 0; s <= maxSymbolValue; ++s) {
  3885. if (normalizedCounter[s] == 0) {
  3886. return FSE_repeat_check;
  3887. }
  3888. }
  3889. return FSE_repeat_valid;
  3890. }
  3891. size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
  3892. const void* const dict, size_t dictSize)
  3893. {
  3894. short offcodeNCount[MaxOff+1];
  3895. unsigned offcodeMaxValue = MaxOff;
  3896. const BYTE* dictPtr = (const BYTE*)dict; /* skip magic num and dict ID */
  3897. const BYTE* const dictEnd = dictPtr + dictSize;
  3898. dictPtr += 8;
  3899. bs->entropy.huf.repeatMode = HUF_repeat_check;
  3900. { unsigned maxSymbolValue = 255;
  3901. unsigned hasZeroWeights = 1;
  3902. size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr,
  3903. dictEnd-dictPtr, &hasZeroWeights);
  3904. /* We only set the loaded table as valid if it contains all non-zero
  3905. * weights. Otherwise, we set it to check */
  3906. if (!hasZeroWeights)
  3907. bs->entropy.huf.repeatMode = HUF_repeat_valid;
  3908. RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted, "");
  3909. RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted, "");
  3910. dictPtr += hufHeaderSize;
  3911. }
  3912. { unsigned offcodeLog;
  3913. size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
  3914. RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");
  3915. RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");
  3916. /* fill all offset symbols to avoid garbage at end of table */
  3917. RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
  3918. bs->entropy.fse.offcodeCTable,
  3919. offcodeNCount, MaxOff, offcodeLog,
  3920. workspace, HUF_WORKSPACE_SIZE)),
  3921. dictionary_corrupted, "");
  3922. /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
  3923. dictPtr += offcodeHeaderSize;
  3924. }
  3925. { short matchlengthNCount[MaxML+1];
  3926. unsigned matchlengthMaxValue = MaxML, matchlengthLog;
  3927. size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
  3928. RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");
  3929. RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
  3930. RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
  3931. bs->entropy.fse.matchlengthCTable,
  3932. matchlengthNCount, matchlengthMaxValue, matchlengthLog,
  3933. workspace, HUF_WORKSPACE_SIZE)),
  3934. dictionary_corrupted, "");
  3935. bs->entropy.fse.matchlength_repeatMode = ZSTD_dictNCountRepeat(matchlengthNCount, matchlengthMaxValue, MaxML);
  3936. dictPtr += matchlengthHeaderSize;
  3937. }
  3938. { short litlengthNCount[MaxLL+1];
  3939. unsigned litlengthMaxValue = MaxLL, litlengthLog;
  3940. size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
  3941. RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");
  3942. RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
  3943. RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
  3944. bs->entropy.fse.litlengthCTable,
  3945. litlengthNCount, litlengthMaxValue, litlengthLog,
  3946. workspace, HUF_WORKSPACE_SIZE)),
  3947. dictionary_corrupted, "");
  3948. bs->entropy.fse.litlength_repeatMode = ZSTD_dictNCountRepeat(litlengthNCount, litlengthMaxValue, MaxLL);
  3949. dictPtr += litlengthHeaderSize;
  3950. }
  3951. RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, "");
  3952. bs->rep[0] = MEM_readLE32(dictPtr+0);
  3953. bs->rep[1] = MEM_readLE32(dictPtr+4);
  3954. bs->rep[2] = MEM_readLE32(dictPtr+8);
  3955. dictPtr += 12;
  3956. { size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
  3957. U32 offcodeMax = MaxOff;
  3958. if (dictContentSize <= ((U32)-1) - 128 KB) {
  3959. U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
  3960. offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
  3961. }
  3962. /* All offset values <= dictContentSize + 128 KB must be representable for a valid table */
  3963. bs->entropy.fse.offcode_repeatMode = ZSTD_dictNCountRepeat(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff));
  3964. /* All repCodes must be <= dictContentSize and != 0 */
  3965. { U32 u;
  3966. for (u=0; u<3; u++) {
  3967. RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted, "");
  3968. RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted, "");
  3969. } } }
  3970. return dictPtr - (const BYTE*)dict;
  3971. }
  3972. /* Dictionary format :
  3973. * See :
  3974. * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#dictionary-format
  3975. */
  3976. /*! ZSTD_loadZstdDictionary() :
  3977. * @return : dictID, or an error code
  3978. * assumptions : magic number supposed already checked
  3979. * dictSize supposed >= 8
  3980. */
  3981. static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
  3982. ZSTD_matchState_t* ms,
  3983. ZSTD_cwksp* ws,
  3984. ZSTD_CCtx_params const* params,
  3985. const void* dict, size_t dictSize,
  3986. ZSTD_dictTableLoadMethod_e dtlm,
  3987. void* workspace)
  3988. {
  3989. const BYTE* dictPtr = (const BYTE*)dict;
  3990. const BYTE* const dictEnd = dictPtr + dictSize;
  3991. size_t dictID;
  3992. size_t eSize;
  3993. ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
  3994. assert(dictSize >= 8);
  3995. assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY);
  3996. dictID = params->fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr + 4 /* skip magic number */ );
  3997. eSize = ZSTD_loadCEntropy(bs, workspace, dict, dictSize);
  3998. FORWARD_IF_ERROR(eSize, "ZSTD_loadCEntropy failed");
  3999. dictPtr += eSize;
  4000. {
  4001. size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
  4002. FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(
  4003. ms, NULL, ws, params, dictPtr, dictContentSize, dtlm), "");
  4004. }
  4005. return dictID;
  4006. }
  4007. /** ZSTD_compress_insertDictionary() :
  4008. * @return : dictID, or an error code */
  4009. static size_t
  4010. ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
  4011. ZSTD_matchState_t* ms,
  4012. ldmState_t* ls,
  4013. ZSTD_cwksp* ws,
  4014. const ZSTD_CCtx_params* params,
  4015. const void* dict, size_t dictSize,
  4016. ZSTD_dictContentType_e dictContentType,
  4017. ZSTD_dictTableLoadMethod_e dtlm,
  4018. void* workspace)
  4019. {
  4020. DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
  4021. if ((dict==NULL) || (dictSize<8)) {
  4022. RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
  4023. return 0;
  4024. }
  4025. ZSTD_reset_compressedBlockState(bs);
  4026. /* dict restricted modes */
  4027. if (dictContentType == ZSTD_dct_rawContent)
  4028. return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm);
  4029. if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
  4030. if (dictContentType == ZSTD_dct_auto) {
  4031. DEBUGLOG(4, "raw content dictionary detected");
  4032. return ZSTD_loadDictionaryContent(
  4033. ms, ls, ws, params, dict, dictSize, dtlm);
  4034. }
  4035. RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
  4036. assert(0); /* impossible */
  4037. }
  4038. /* dict as full zstd dictionary */
  4039. return ZSTD_loadZstdDictionary(
  4040. bs, ms, ws, params, dict, dictSize, dtlm, workspace);
  4041. }
  4042. #define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB)
  4043. #define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6ULL)
  4044. /*! ZSTD_compressBegin_internal() :
  4045. * @return : 0, or an error code */
  4046. static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
  4047. const void* dict, size_t dictSize,
  4048. ZSTD_dictContentType_e dictContentType,
  4049. ZSTD_dictTableLoadMethod_e dtlm,
  4050. const ZSTD_CDict* cdict,
  4051. const ZSTD_CCtx_params* params, U64 pledgedSrcSize,
  4052. ZSTD_buffered_policy_e zbuff)
  4053. {
  4054. size_t const dictContentSize = cdict ? cdict->dictContentSize : dictSize;
  4055. #if ZSTD_TRACE
  4056. cctx->traceCtx = (ZSTD_trace_compress_begin != NULL) ? ZSTD_trace_compress_begin(cctx) : 0;
  4057. #endif
  4058. DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params->cParams.windowLog);
  4059. /* params are supposed to be fully validated at this point */
  4060. assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
  4061. assert(!((dict) && (cdict))); /* either dict or cdict, not both */
  4062. if ( (cdict)
  4063. && (cdict->dictContentSize > 0)
  4064. && ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
  4065. || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
  4066. || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
  4067. || cdict->compressionLevel == 0)
  4068. && (params->attachDictPref != ZSTD_dictForceLoad) ) {
  4069. return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
  4070. }
  4071. FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
  4072. dictContentSize,
  4073. ZSTDcrp_makeClean, zbuff) , "");
  4074. { size_t const dictID = cdict ?
  4075. ZSTD_compress_insertDictionary(
  4076. cctx->blockState.prevCBlock, &cctx->blockState.matchState,
  4077. &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent,
  4078. cdict->dictContentSize, cdict->dictContentType, dtlm,
  4079. cctx->entropyWorkspace)
  4080. : ZSTD_compress_insertDictionary(
  4081. cctx->blockState.prevCBlock, &cctx->blockState.matchState,
  4082. &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize,
  4083. dictContentType, dtlm, cctx->entropyWorkspace);
  4084. FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
  4085. assert(dictID <= UINT_MAX);
  4086. cctx->dictID = (U32)dictID;
  4087. cctx->dictContentSize = dictContentSize;
  4088. }
  4089. return 0;
  4090. }
  4091. size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
  4092. const void* dict, size_t dictSize,
  4093. ZSTD_dictContentType_e dictContentType,
  4094. ZSTD_dictTableLoadMethod_e dtlm,
  4095. const ZSTD_CDict* cdict,
  4096. const ZSTD_CCtx_params* params,
  4097. unsigned long long pledgedSrcSize)
  4098. {
  4099. DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params->cParams.windowLog);
  4100. /* compression parameters verification and optimization */
  4101. FORWARD_IF_ERROR( ZSTD_checkCParams(params->cParams) , "");
  4102. return ZSTD_compressBegin_internal(cctx,
  4103. dict, dictSize, dictContentType, dtlm,
  4104. cdict,
  4105. params, pledgedSrcSize,
  4106. ZSTDb_not_buffered);
  4107. }
  4108. /*! ZSTD_compressBegin_advanced() :
  4109. * @return : 0, or an error code */
  4110. size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
  4111. const void* dict, size_t dictSize,
  4112. ZSTD_parameters params, unsigned long long pledgedSrcSize)
  4113. {
  4114. ZSTD_CCtx_params cctxParams;
  4115. ZSTD_CCtxParams_init_internal(&cctxParams, &params, ZSTD_NO_CLEVEL);
  4116. return ZSTD_compressBegin_advanced_internal(cctx,
  4117. dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast,
  4118. NULL /*cdict*/,
  4119. &cctxParams, pledgedSrcSize);
  4120. }
  4121. size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
  4122. {
  4123. ZSTD_CCtx_params cctxParams;
  4124. {
  4125. ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict);
  4126. ZSTD_CCtxParams_init_internal(&cctxParams, &params, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel);
  4127. }
  4128. DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize);
  4129. return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
  4130. &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
  4131. }
  4132. size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
  4133. {
  4134. return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
  4135. }
  4136. /*! ZSTD_writeEpilogue() :
  4137. * Ends a frame.
  4138. * @return : nb of bytes written into dst (or an error code) */
  4139. static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
  4140. {
  4141. BYTE* const ostart = (BYTE*)dst;
  4142. BYTE* op = ostart;
  4143. size_t fhSize = 0;
  4144. DEBUGLOG(4, "ZSTD_writeEpilogue");
  4145. RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
  4146. /* special case : empty frame */
  4147. if (cctx->stage == ZSTDcs_init) {
  4148. fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
  4149. FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
  4150. dstCapacity -= fhSize;
  4151. op += fhSize;
  4152. cctx->stage = ZSTDcs_ongoing;
  4153. }
  4154. if (cctx->stage != ZSTDcs_ending) {
  4155. /* write one last empty block, make it the "last" block */
  4156. U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
  4157. RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for epilogue");
  4158. MEM_writeLE32(op, cBlockHeader24);
  4159. op += ZSTD_blockHeaderSize;
  4160. dstCapacity -= ZSTD_blockHeaderSize;
  4161. }
  4162. if (cctx->appliedParams.fParams.checksumFlag) {
  4163. U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
  4164. RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
  4165. DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum);
  4166. MEM_writeLE32(op, checksum);
  4167. op += 4;
  4168. }
  4169. cctx->stage = ZSTDcs_created; /* return to "created but no init" status */
  4170. return op-ostart;
  4171. }
  4172. void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize)
  4173. {
  4174. #if ZSTD_TRACE
  4175. if (cctx->traceCtx && ZSTD_trace_compress_end != NULL) {
  4176. int const streaming = cctx->inBuffSize > 0 || cctx->outBuffSize > 0 || cctx->appliedParams.nbWorkers > 0;
  4177. ZSTD_Trace trace;
  4178. ZSTD_memset(&trace, 0, sizeof(trace));
  4179. trace.version = ZSTD_VERSION_NUMBER;
  4180. trace.streaming = streaming;
  4181. trace.dictionaryID = cctx->dictID;
  4182. trace.dictionarySize = cctx->dictContentSize;
  4183. trace.uncompressedSize = cctx->consumedSrcSize;
  4184. trace.compressedSize = cctx->producedCSize + extraCSize;
  4185. trace.params = &cctx->appliedParams;
  4186. trace.cctx = cctx;
  4187. ZSTD_trace_compress_end(cctx->traceCtx, &trace);
  4188. }
  4189. cctx->traceCtx = 0;
  4190. #else
  4191. (void)cctx;
  4192. (void)extraCSize;
  4193. #endif
  4194. }
  4195. size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
  4196. void* dst, size_t dstCapacity,
  4197. const void* src, size_t srcSize)
  4198. {
  4199. size_t endResult;
  4200. size_t const cSize = ZSTD_compressContinue_internal(cctx,
  4201. dst, dstCapacity, src, srcSize,
  4202. 1 /* frame mode */, 1 /* last chunk */);
  4203. FORWARD_IF_ERROR(cSize, "ZSTD_compressContinue_internal failed");
  4204. endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
  4205. FORWARD_IF_ERROR(endResult, "ZSTD_writeEpilogue failed");
  4206. assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
  4207. if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
  4208. ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
  4209. DEBUGLOG(4, "end of frame : controlling src size");
  4210. RETURN_ERROR_IF(
  4211. cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1,
  4212. srcSize_wrong,
  4213. "error : pledgedSrcSize = %u, while realSrcSize = %u",
  4214. (unsigned)cctx->pledgedSrcSizePlusOne-1,
  4215. (unsigned)cctx->consumedSrcSize);
  4216. }
  4217. ZSTD_CCtx_trace(cctx, endResult);
  4218. return cSize + endResult;
  4219. }
  4220. size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
  4221. void* dst, size_t dstCapacity,
  4222. const void* src, size_t srcSize,
  4223. const void* dict,size_t dictSize,
  4224. ZSTD_parameters params)
  4225. {
  4226. DEBUGLOG(4, "ZSTD_compress_advanced");
  4227. FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), "");
  4228. ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, &params, ZSTD_NO_CLEVEL);
  4229. return ZSTD_compress_advanced_internal(cctx,
  4230. dst, dstCapacity,
  4231. src, srcSize,
  4232. dict, dictSize,
  4233. &cctx->simpleApiParams);
  4234. }
  4235. /* Internal */
  4236. size_t ZSTD_compress_advanced_internal(
  4237. ZSTD_CCtx* cctx,
  4238. void* dst, size_t dstCapacity,
  4239. const void* src, size_t srcSize,
  4240. const void* dict,size_t dictSize,
  4241. const ZSTD_CCtx_params* params)
  4242. {
  4243. DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
  4244. FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
  4245. dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
  4246. params, srcSize, ZSTDb_not_buffered) , "");
  4247. return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
  4248. }
  4249. size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
  4250. void* dst, size_t dstCapacity,
  4251. const void* src, size_t srcSize,
  4252. const void* dict, size_t dictSize,
  4253. int compressionLevel)
  4254. {
  4255. {
  4256. ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0, ZSTD_cpm_noAttachDict);
  4257. assert(params.fParams.contentSizeFlag == 1);
  4258. ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, &params, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT: compressionLevel);
  4259. }
  4260. DEBUGLOG(4, "ZSTD_compress_usingDict (srcSize=%u)", (unsigned)srcSize);
  4261. return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctx->simpleApiParams);
  4262. }
  4263. size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
  4264. void* dst, size_t dstCapacity,
  4265. const void* src, size_t srcSize,
  4266. int compressionLevel)
  4267. {
  4268. DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize);
  4269. assert(cctx != NULL);
  4270. return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
  4271. }
  4272. size_t ZSTD_compress(void* dst, size_t dstCapacity,
  4273. const void* src, size_t srcSize,
  4274. int compressionLevel)
  4275. {
  4276. size_t result;
  4277. #if ZSTD_COMPRESS_HEAPMODE
  4278. ZSTD_CCtx* cctx = ZSTD_createCCtx();
  4279. RETURN_ERROR_IF(!cctx, memory_allocation, "ZSTD_createCCtx failed");
  4280. result = ZSTD_compressCCtx(cctx, dst, dstCapacity, src, srcSize, compressionLevel);
  4281. ZSTD_freeCCtx(cctx);
  4282. #else
  4283. ZSTD_CCtx ctxBody;
  4284. ZSTD_initCCtx(&ctxBody, ZSTD_defaultCMem);
  4285. result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel);
  4286. ZSTD_freeCCtxContent(&ctxBody); /* can't free ctxBody itself, as it's on stack; free only heap content */
  4287. #endif
  4288. return result;
  4289. }
  4290. /* ===== Dictionary API ===== */
  4291. /*! ZSTD_estimateCDictSize_advanced() :
  4292. * Estimate amount of memory that will be needed to create a dictionary with following arguments */
  4293. size_t ZSTD_estimateCDictSize_advanced(
  4294. size_t dictSize, ZSTD_compressionParameters cParams,
  4295. ZSTD_dictLoadMethod_e dictLoadMethod)
  4296. {
  4297. DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict));
  4298. return ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
  4299. + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
  4300. /* enableDedicatedDictSearch == 1 ensures that CDict estimation will not be too small
  4301. * in case we are using DDS with row-hash. */
  4302. + ZSTD_sizeof_matchState(&cParams, ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams),
  4303. /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0)
  4304. + (dictLoadMethod == ZSTD_dlm_byRef ? 0
  4305. : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *))));
  4306. }
  4307. size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
  4308. {
  4309. ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
  4310. return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy);
  4311. }
  4312. size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
  4313. {
  4314. if (cdict==NULL) return 0; /* support sizeof on NULL */
  4315. DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict));
  4316. /* cdict may be in the workspace */
  4317. return (cdict->workspace.workspace == cdict ? 0 : sizeof(*cdict))
  4318. + ZSTD_cwksp_sizeof(&cdict->workspace);
  4319. }
  4320. static size_t ZSTD_initCDict_internal(
  4321. ZSTD_CDict* cdict,
  4322. const void* dictBuffer, size_t dictSize,
  4323. ZSTD_dictLoadMethod_e dictLoadMethod,
  4324. ZSTD_dictContentType_e dictContentType,
  4325. ZSTD_CCtx_params params)
  4326. {
  4327. DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType);
  4328. assert(!ZSTD_checkCParams(params.cParams));
  4329. cdict->matchState.cParams = params.cParams;
  4330. cdict->matchState.dedicatedDictSearch = params.enableDedicatedDictSearch;
  4331. if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
  4332. cdict->dictContent = dictBuffer;
  4333. } else {
  4334. void *internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, sizeof(void*)));
  4335. RETURN_ERROR_IF(!internalBuffer, memory_allocation, "NULL pointer!");
  4336. cdict->dictContent = internalBuffer;
  4337. ZSTD_memcpy(internalBuffer, dictBuffer, dictSize);
  4338. }
  4339. cdict->dictContentSize = dictSize;
  4340. cdict->dictContentType = dictContentType;
  4341. cdict->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cdict->workspace, HUF_WORKSPACE_SIZE);
  4342. /* Reset the state to no dictionary */
  4343. ZSTD_reset_compressedBlockState(&cdict->cBlockState);
  4344. FORWARD_IF_ERROR(ZSTD_reset_matchState(
  4345. &cdict->matchState,
  4346. &cdict->workspace,
  4347. &params.cParams,
  4348. params.useRowMatchFinder,
  4349. ZSTDcrp_makeClean,
  4350. ZSTDirp_reset,
  4351. ZSTD_resetTarget_CDict), "");
  4352. /* (Maybe) load the dictionary
  4353. * Skips loading the dictionary if it is < 8 bytes.
  4354. */
  4355. { params.compressionLevel = ZSTD_CLEVEL_DEFAULT;
  4356. params.fParams.contentSizeFlag = 1;
  4357. { size_t const dictID = ZSTD_compress_insertDictionary(
  4358. &cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace,
  4359. &params, cdict->dictContent, cdict->dictContentSize,
  4360. dictContentType, ZSTD_dtlm_full, cdict->entropyWorkspace);
  4361. FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
  4362. assert(dictID <= (size_t)(U32)-1);
  4363. cdict->dictID = (U32)dictID;
  4364. }
  4365. }
  4366. return 0;
  4367. }
  4368. static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize,
  4369. ZSTD_dictLoadMethod_e dictLoadMethod,
  4370. ZSTD_compressionParameters cParams,
  4371. ZSTD_paramSwitch_e useRowMatchFinder,
  4372. U32 enableDedicatedDictSearch,
  4373. ZSTD_customMem customMem)
  4374. {
  4375. if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
  4376. { size_t const workspaceSize =
  4377. ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) +
  4378. ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) +
  4379. ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, enableDedicatedDictSearch, /* forCCtx */ 0) +
  4380. (dictLoadMethod == ZSTD_dlm_byRef ? 0
  4381. : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))));
  4382. void* const workspace = ZSTD_customMalloc(workspaceSize, customMem);
  4383. ZSTD_cwksp ws;
  4384. ZSTD_CDict* cdict;
  4385. if (!workspace) {
  4386. ZSTD_customFree(workspace, customMem);
  4387. return NULL;
  4388. }
  4389. ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_dynamic_alloc);
  4390. cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
  4391. assert(cdict != NULL);
  4392. ZSTD_cwksp_move(&cdict->workspace, &ws);
  4393. cdict->customMem = customMem;
  4394. cdict->compressionLevel = ZSTD_NO_CLEVEL; /* signals advanced API usage */
  4395. cdict->useRowMatchFinder = useRowMatchFinder;
  4396. return cdict;
  4397. }
  4398. }
  4399. ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
  4400. ZSTD_dictLoadMethod_e dictLoadMethod,
  4401. ZSTD_dictContentType_e dictContentType,
  4402. ZSTD_compressionParameters cParams,
  4403. ZSTD_customMem customMem)
  4404. {
  4405. ZSTD_CCtx_params cctxParams;
  4406. ZSTD_memset(&cctxParams, 0, sizeof(cctxParams));
  4407. ZSTD_CCtxParams_init(&cctxParams, 0);
  4408. cctxParams.cParams = cParams;
  4409. cctxParams.customMem = customMem;
  4410. return ZSTD_createCDict_advanced2(
  4411. dictBuffer, dictSize,
  4412. dictLoadMethod, dictContentType,
  4413. &cctxParams, customMem);
  4414. }
  4415. ZSTD_CDict* ZSTD_createCDict_advanced2(
  4416. const void* dict, size_t dictSize,
  4417. ZSTD_dictLoadMethod_e dictLoadMethod,
  4418. ZSTD_dictContentType_e dictContentType,
  4419. const ZSTD_CCtx_params* originalCctxParams,
  4420. ZSTD_customMem customMem)
  4421. {
  4422. ZSTD_CCtx_params cctxParams = *originalCctxParams;
  4423. ZSTD_compressionParameters cParams;
  4424. ZSTD_CDict* cdict;
  4425. DEBUGLOG(3, "ZSTD_createCDict_advanced2, mode %u", (unsigned)dictContentType);
  4426. if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
  4427. if (cctxParams.enableDedicatedDictSearch) {
  4428. cParams = ZSTD_dedicatedDictSearch_getCParams(
  4429. cctxParams.compressionLevel, dictSize);
  4430. ZSTD_overrideCParams(&cParams, &cctxParams.cParams);
  4431. } else {
  4432. cParams = ZSTD_getCParamsFromCCtxParams(
  4433. &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
  4434. }
  4435. if (!ZSTD_dedicatedDictSearch_isSupported(&cParams)) {
  4436. /* Fall back to non-DDSS params */
  4437. cctxParams.enableDedicatedDictSearch = 0;
  4438. cParams = ZSTD_getCParamsFromCCtxParams(
  4439. &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
  4440. }
  4441. DEBUGLOG(3, "ZSTD_createCDict_advanced2: DDS: %u", cctxParams.enableDedicatedDictSearch);
  4442. cctxParams.cParams = cParams;
  4443. cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams);
  4444. cdict = ZSTD_createCDict_advanced_internal(dictSize,
  4445. dictLoadMethod, cctxParams.cParams,
  4446. cctxParams.useRowMatchFinder, cctxParams.enableDedicatedDictSearch,
  4447. customMem);
  4448. if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
  4449. dict, dictSize,
  4450. dictLoadMethod, dictContentType,
  4451. cctxParams) )) {
  4452. ZSTD_freeCDict(cdict);
  4453. return NULL;
  4454. }
  4455. return cdict;
  4456. }
  4457. ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
  4458. {
  4459. ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
  4460. ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
  4461. ZSTD_dlm_byCopy, ZSTD_dct_auto,
  4462. cParams, ZSTD_defaultCMem);
  4463. if (cdict)
  4464. cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
  4465. return cdict;
  4466. }
  4467. ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
  4468. {
  4469. ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
  4470. ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
  4471. ZSTD_dlm_byRef, ZSTD_dct_auto,
  4472. cParams, ZSTD_defaultCMem);
  4473. if (cdict)
  4474. cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
  4475. return cdict;
  4476. }
  4477. size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
  4478. {
  4479. if (cdict==NULL) return 0; /* support free on NULL */
  4480. { ZSTD_customMem const cMem = cdict->customMem;
  4481. int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict);
  4482. ZSTD_cwksp_free(&cdict->workspace, cMem);
  4483. if (!cdictInWorkspace) {
  4484. ZSTD_customFree(cdict, cMem);
  4485. }
  4486. return 0;
  4487. }
  4488. }
  4489. /*! ZSTD_initStaticCDict_advanced() :
  4490. * Generate a digested dictionary in provided memory area.
  4491. * workspace: The memory area to emplace the dictionary into.
  4492. * Provided pointer must 8-bytes aligned.
  4493. * It must outlive dictionary usage.
  4494. * workspaceSize: Use ZSTD_estimateCDictSize()
  4495. * to determine how large workspace must be.
  4496. * cParams : use ZSTD_getCParams() to transform a compression level
  4497. * into its relevants cParams.
  4498. * @return : pointer to ZSTD_CDict*, or NULL if error (size too small)
  4499. * Note : there is no corresponding "free" function.
  4500. * Since workspace was allocated externally, it must be freed externally.
  4501. */
  4502. const ZSTD_CDict* ZSTD_initStaticCDict(
  4503. void* workspace, size_t workspaceSize,
  4504. const void* dict, size_t dictSize,
  4505. ZSTD_dictLoadMethod_e dictLoadMethod,
  4506. ZSTD_dictContentType_e dictContentType,
  4507. ZSTD_compressionParameters cParams)
  4508. {
  4509. ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams);
  4510. /* enableDedicatedDictSearch == 1 ensures matchstate is not too small in case this CDict will be used for DDS + row hash */
  4511. size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0);
  4512. size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
  4513. + (dictLoadMethod == ZSTD_dlm_byRef ? 0
  4514. : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))))
  4515. + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
  4516. + matchStateSize;
  4517. ZSTD_CDict* cdict;
  4518. ZSTD_CCtx_params params;
  4519. if ((size_t)workspace & 7) return NULL; /* 8-aligned */
  4520. {
  4521. ZSTD_cwksp ws;
  4522. ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
  4523. cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
  4524. if (cdict == NULL) return NULL;
  4525. ZSTD_cwksp_move(&cdict->workspace, &ws);
  4526. }
  4527. DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
  4528. (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));
  4529. if (workspaceSize < neededSize) return NULL;
  4530. ZSTD_CCtxParams_init(&params, 0);
  4531. params.cParams = cParams;
  4532. params.useRowMatchFinder = useRowMatchFinder;
  4533. cdict->useRowMatchFinder = useRowMatchFinder;
  4534. if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
  4535. dict, dictSize,
  4536. dictLoadMethod, dictContentType,
  4537. params) ))
  4538. return NULL;
  4539. return cdict;
  4540. }
  4541. ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)
  4542. {
  4543. assert(cdict != NULL);
  4544. return cdict->matchState.cParams;
  4545. }
  4546. /*! ZSTD_getDictID_fromCDict() :
  4547. * Provides the dictID of the dictionary loaded into `cdict`.
  4548. * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
  4549. * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
  4550. unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict)
  4551. {
  4552. if (cdict==NULL) return 0;
  4553. return cdict->dictID;
  4554. }
  4555. /* ZSTD_compressBegin_usingCDict_internal() :
  4556. * Implementation of various ZSTD_compressBegin_usingCDict* functions.
  4557. */
  4558. static size_t ZSTD_compressBegin_usingCDict_internal(
  4559. ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
  4560. ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
  4561. {
  4562. ZSTD_CCtx_params cctxParams;
  4563. DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_internal");
  4564. RETURN_ERROR_IF(cdict==NULL, dictionary_wrong, "NULL pointer!");
  4565. /* Initialize the cctxParams from the cdict */
  4566. {
  4567. ZSTD_parameters params;
  4568. params.fParams = fParams;
  4569. params.cParams = ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
  4570. || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
  4571. || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
  4572. || cdict->compressionLevel == 0 ) ?
  4573. ZSTD_getCParamsFromCDict(cdict)
  4574. : ZSTD_getCParams(cdict->compressionLevel,
  4575. pledgedSrcSize,
  4576. cdict->dictContentSize);
  4577. ZSTD_CCtxParams_init_internal(&cctxParams, &params, cdict->compressionLevel);
  4578. }
  4579. /* Increase window log to fit the entire dictionary and source if the
  4580. * source size is known. Limit the increase to 19, which is the
  4581. * window log for compression level 1 with the largest source size.
  4582. */
  4583. if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
  4584. U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19);
  4585. U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1;
  4586. cctxParams.cParams.windowLog = MAX(cctxParams.cParams.windowLog, limitedSrcLog);
  4587. }
  4588. return ZSTD_compressBegin_internal(cctx,
  4589. NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,
  4590. cdict,
  4591. &cctxParams, pledgedSrcSize,
  4592. ZSTDb_not_buffered);
  4593. }
  4594. /* ZSTD_compressBegin_usingCDict_advanced() :
  4595. * This function is DEPRECATED.
  4596. * cdict must be != NULL */
  4597. size_t ZSTD_compressBegin_usingCDict_advanced(
  4598. ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
  4599. ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
  4600. {
  4601. return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, pledgedSrcSize);
  4602. }
  4603. /* ZSTD_compressBegin_usingCDict() :
  4604. * cdict must be != NULL */
  4605. size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
  4606. {
  4607. ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
  4608. return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
  4609. }
  4610. /*! ZSTD_compress_usingCDict_internal():
  4611. * Implementation of various ZSTD_compress_usingCDict* functions.
  4612. */
  4613. static size_t ZSTD_compress_usingCDict_internal(ZSTD_CCtx* cctx,
  4614. void* dst, size_t dstCapacity,
  4615. const void* src, size_t srcSize,
  4616. const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
  4617. {
  4618. FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */
  4619. return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
  4620. }
  4621. /*! ZSTD_compress_usingCDict_advanced():
  4622. * This function is DEPRECATED.
  4623. */
  4624. size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
  4625. void* dst, size_t dstCapacity,
  4626. const void* src, size_t srcSize,
  4627. const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
  4628. {
  4629. return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
  4630. }
  4631. /*! ZSTD_compress_usingCDict() :
  4632. * Compression using a digested Dictionary.
  4633. * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
  4634. * Note that compression parameters are decided at CDict creation time
  4635. * while frame parameters are hardcoded */
  4636. size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
  4637. void* dst, size_t dstCapacity,
  4638. const void* src, size_t srcSize,
  4639. const ZSTD_CDict* cdict)
  4640. {
  4641. ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
  4642. return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
  4643. }
  4644. /* ******************************************************************
  4645. * Streaming
  4646. ********************************************************************/
  4647. ZSTD_CStream* ZSTD_createCStream(void)
  4648. {
  4649. DEBUGLOG(3, "ZSTD_createCStream");
  4650. return ZSTD_createCStream_advanced(ZSTD_defaultCMem);
  4651. }
  4652. ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize)
  4653. {
  4654. return ZSTD_initStaticCCtx(workspace, workspaceSize);
  4655. }
  4656. ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)
  4657. { /* CStream and CCtx are now same object */
  4658. return ZSTD_createCCtx_advanced(customMem);
  4659. }
  4660. size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
  4661. {
  4662. return ZSTD_freeCCtx(zcs); /* same object */
  4663. }
  4664. /*====== Initialization ======*/
  4665. size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX; }
  4666. size_t ZSTD_CStreamOutSize(void)
  4667. {
  4668. return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
  4669. }
  4670. static ZSTD_cParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize)
  4671. {
  4672. if (cdict != NULL && ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize))
  4673. return ZSTD_cpm_attachDict;
  4674. else
  4675. return ZSTD_cpm_noAttachDict;
  4676. }
  4677. /* ZSTD_resetCStream():
  4678. * pledgedSrcSize == 0 means "unknown" */
  4679. size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss)
  4680. {
  4681. /* temporary : 0 interpreted as "unknown" during transition period.
  4682. * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
  4683. * 0 will be interpreted as "empty" in the future.
  4684. */
  4685. U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
  4686. DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize);
  4687. FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
  4688. FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
  4689. return 0;
  4690. }
  4691. /*! ZSTD_initCStream_internal() :
  4692. * Note : for lib/compress only. Used by zstdmt_compress.c.
  4693. * Assumption 1 : params are valid
  4694. * Assumption 2 : either dict, or cdict, is defined, not both */
  4695. size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
  4696. const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
  4697. const ZSTD_CCtx_params* params,
  4698. unsigned long long pledgedSrcSize)
  4699. {
  4700. DEBUGLOG(4, "ZSTD_initCStream_internal");
  4701. FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
  4702. FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
  4703. assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
  4704. zcs->requestedParams = *params;
  4705. assert(!((dict) && (cdict))); /* either dict or cdict, not both */
  4706. if (dict) {
  4707. FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
  4708. } else {
  4709. /* Dictionary is cleared if !cdict */
  4710. FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
  4711. }
  4712. return 0;
  4713. }
  4714. /* ZSTD_initCStream_usingCDict_advanced() :
  4715. * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
  4716. size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
  4717. const ZSTD_CDict* cdict,
  4718. ZSTD_frameParameters fParams,
  4719. unsigned long long pledgedSrcSize)
  4720. {
  4721. DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced");
  4722. FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
  4723. FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
  4724. zcs->requestedParams.fParams = fParams;
  4725. FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
  4726. return 0;
  4727. }
  4728. /* note : cdict must outlive compression session */
  4729. size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
  4730. {
  4731. DEBUGLOG(4, "ZSTD_initCStream_usingCDict");
  4732. FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
  4733. FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
  4734. return 0;
  4735. }
  4736. /* ZSTD_initCStream_advanced() :
  4737. * pledgedSrcSize must be exact.
  4738. * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
  4739. * dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */
  4740. size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
  4741. const void* dict, size_t dictSize,
  4742. ZSTD_parameters params, unsigned long long pss)
  4743. {
  4744. /* for compatibility with older programs relying on this behavior.
  4745. * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN.
  4746. * This line will be removed in the future.
  4747. */
  4748. U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
  4749. DEBUGLOG(4, "ZSTD_initCStream_advanced");
  4750. FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
  4751. FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
  4752. FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
  4753. ZSTD_CCtxParams_setZstdParams(&zcs->requestedParams, &params);
  4754. FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
  4755. return 0;
  4756. }
  4757. size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
  4758. {
  4759. DEBUGLOG(4, "ZSTD_initCStream_usingDict");
  4760. FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
  4761. FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
  4762. FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
  4763. return 0;
  4764. }
  4765. size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
  4766. {
  4767. /* temporary : 0 interpreted as "unknown" during transition period.
  4768. * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
  4769. * 0 will be interpreted as "empty" in the future.
  4770. */
  4771. U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
  4772. DEBUGLOG(4, "ZSTD_initCStream_srcSize");
  4773. FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
  4774. FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , "");
  4775. FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
  4776. FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
  4777. return 0;
  4778. }
  4779. size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
  4780. {
  4781. DEBUGLOG(4, "ZSTD_initCStream");
  4782. FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
  4783. FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , "");
  4784. FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
  4785. return 0;
  4786. }
  4787. /*====== Compression ======*/
  4788. static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx)
  4789. {
  4790. size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos;
  4791. if (hintInSize==0) hintInSize = cctx->blockSize;
  4792. return hintInSize;
  4793. }
  4794. /** ZSTD_compressStream_generic():
  4795. * internal function for all *compressStream*() variants
  4796. * non-static, because can be called from zstdmt_compress.c
  4797. * @return : hint size for next input */
  4798. static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
  4799. ZSTD_outBuffer* output,
  4800. ZSTD_inBuffer* input,
  4801. ZSTD_EndDirective const flushMode)
  4802. {
  4803. const char* const istart = (const char*)input->src;
  4804. const char* const iend = input->size != 0 ? istart + input->size : istart;
  4805. const char* ip = input->pos != 0 ? istart + input->pos : istart;
  4806. char* const ostart = (char*)output->dst;
  4807. char* const oend = output->size != 0 ? ostart + output->size : ostart;
  4808. char* op = output->pos != 0 ? ostart + output->pos : ostart;
  4809. U32 someMoreWork = 1;
  4810. /* check expectations */
  4811. DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode);
  4812. if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
  4813. assert(zcs->inBuff != NULL);
  4814. assert(zcs->inBuffSize > 0);
  4815. }
  4816. if (zcs->appliedParams.outBufferMode == ZSTD_bm_buffered) {
  4817. assert(zcs->outBuff != NULL);
  4818. assert(zcs->outBuffSize > 0);
  4819. }
  4820. assert(output->pos <= output->size);
  4821. assert(input->pos <= input->size);
  4822. assert((U32)flushMode <= (U32)ZSTD_e_end);
  4823. while (someMoreWork) {
  4824. switch(zcs->streamStage)
  4825. {
  4826. case zcss_init:
  4827. RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!");
  4828. case zcss_load:
  4829. if ( (flushMode == ZSTD_e_end)
  4830. && ( (size_t)(oend-op) >= ZSTD_compressBound(iend-ip) /* Enough output space */
  4831. || zcs->appliedParams.outBufferMode == ZSTD_bm_stable) /* OR we are allowed to return dstSizeTooSmall */
  4832. && (zcs->inBuffPos == 0) ) {
  4833. /* shortcut to compression pass directly into output buffer */
  4834. size_t const cSize = ZSTD_compressEnd(zcs,
  4835. op, oend-op, ip, iend-ip);
  4836. DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
  4837. FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed");
  4838. ip = iend;
  4839. op += cSize;
  4840. zcs->frameEnded = 1;
  4841. ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
  4842. someMoreWork = 0; break;
  4843. }
  4844. /* complete loading into inBuffer in buffered mode */
  4845. if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
  4846. size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
  4847. size_t const loaded = ZSTD_limitCopy(
  4848. zcs->inBuff + zcs->inBuffPos, toLoad,
  4849. ip, iend-ip);
  4850. zcs->inBuffPos += loaded;
  4851. if (loaded != 0)
  4852. ip += loaded;
  4853. if ( (flushMode == ZSTD_e_continue)
  4854. && (zcs->inBuffPos < zcs->inBuffTarget) ) {
  4855. /* not enough input to fill full block : stop here */
  4856. someMoreWork = 0; break;
  4857. }
  4858. if ( (flushMode == ZSTD_e_flush)
  4859. && (zcs->inBuffPos == zcs->inToCompress) ) {
  4860. /* empty */
  4861. someMoreWork = 0; break;
  4862. }
  4863. }
  4864. /* compress current block (note : this stage cannot be stopped in the middle) */
  4865. DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode);
  4866. { int const inputBuffered = (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered);
  4867. void* cDst;
  4868. size_t cSize;
  4869. size_t oSize = oend-op;
  4870. size_t const iSize = inputBuffered
  4871. ? zcs->inBuffPos - zcs->inToCompress
  4872. : MIN((size_t)(iend - ip), zcs->blockSize);
  4873. if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bm_stable)
  4874. cDst = op; /* compress into output buffer, to skip flush stage */
  4875. else
  4876. cDst = zcs->outBuff, oSize = zcs->outBuffSize;
  4877. if (inputBuffered) {
  4878. unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);
  4879. cSize = lastBlock ?
  4880. ZSTD_compressEnd(zcs, cDst, oSize,
  4881. zcs->inBuff + zcs->inToCompress, iSize) :
  4882. ZSTD_compressContinue(zcs, cDst, oSize,
  4883. zcs->inBuff + zcs->inToCompress, iSize);
  4884. FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
  4885. zcs->frameEnded = lastBlock;
  4886. /* prepare next block */
  4887. zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
  4888. if (zcs->inBuffTarget > zcs->inBuffSize)
  4889. zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
  4890. DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
  4891. (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize);
  4892. if (!lastBlock)
  4893. assert(zcs->inBuffTarget <= zcs->inBuffSize);
  4894. zcs->inToCompress = zcs->inBuffPos;
  4895. } else {
  4896. unsigned const lastBlock = (ip + iSize == iend);
  4897. assert(flushMode == ZSTD_e_end /* Already validated */);
  4898. cSize = lastBlock ?
  4899. ZSTD_compressEnd(zcs, cDst, oSize, ip, iSize) :
  4900. ZSTD_compressContinue(zcs, cDst, oSize, ip, iSize);
  4901. /* Consume the input prior to error checking to mirror buffered mode. */
  4902. if (iSize > 0)
  4903. ip += iSize;
  4904. FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
  4905. zcs->frameEnded = lastBlock;
  4906. if (lastBlock)
  4907. assert(ip == iend);
  4908. }
  4909. if (cDst == op) { /* no need to flush */
  4910. op += cSize;
  4911. if (zcs->frameEnded) {
  4912. DEBUGLOG(5, "Frame completed directly in outBuffer");
  4913. someMoreWork = 0;
  4914. ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
  4915. }
  4916. break;
  4917. }
  4918. zcs->outBuffContentSize = cSize;
  4919. zcs->outBuffFlushedSize = 0;
  4920. zcs->streamStage = zcss_flush; /* pass-through to flush stage */
  4921. }
  4922. ZSTD_FALLTHROUGH;
  4923. case zcss_flush:
  4924. DEBUGLOG(5, "flush stage");
  4925. assert(zcs->appliedParams.outBufferMode == ZSTD_bm_buffered);
  4926. { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
  4927. size_t const flushed = ZSTD_limitCopy(op, (size_t)(oend-op),
  4928. zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
  4929. DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
  4930. (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed);
  4931. if (flushed)
  4932. op += flushed;
  4933. zcs->outBuffFlushedSize += flushed;
  4934. if (toFlush!=flushed) {
  4935. /* flush not fully completed, presumably because dst is too small */
  4936. assert(op==oend);
  4937. someMoreWork = 0;
  4938. break;
  4939. }
  4940. zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
  4941. if (zcs->frameEnded) {
  4942. DEBUGLOG(5, "Frame completed on flush");
  4943. someMoreWork = 0;
  4944. ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
  4945. break;
  4946. }
  4947. zcs->streamStage = zcss_load;
  4948. break;
  4949. }
  4950. default: /* impossible */
  4951. assert(0);
  4952. }
  4953. }
  4954. input->pos = ip - istart;
  4955. output->pos = op - ostart;
  4956. if (zcs->frameEnded) return 0;
  4957. return ZSTD_nextInputSizeHint(zcs);
  4958. }
  4959. static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx)
  4960. {
  4961. #ifdef ZSTD_MULTITHREAD
  4962. if (cctx->appliedParams.nbWorkers >= 1) {
  4963. assert(cctx->mtctx != NULL);
  4964. return ZSTDMT_nextInputSizeHint(cctx->mtctx);
  4965. }
  4966. #endif
  4967. return ZSTD_nextInputSizeHint(cctx);
  4968. }
  4969. size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
  4970. {
  4971. FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) , "");
  4972. return ZSTD_nextInputSizeHint_MTorST(zcs);
  4973. }
  4974. /* After a compression call set the expected input/output buffer.
  4975. * This is validated at the start of the next compression call.
  4976. */
  4977. static void ZSTD_setBufferExpectations(ZSTD_CCtx* cctx, ZSTD_outBuffer const* output, ZSTD_inBuffer const* input)
  4978. {
  4979. if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
  4980. cctx->expectedInBuffer = *input;
  4981. }
  4982. if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {
  4983. cctx->expectedOutBufferSize = output->size - output->pos;
  4984. }
  4985. }
  4986. /* Validate that the input/output buffers match the expectations set by
  4987. * ZSTD_setBufferExpectations.
  4988. */
  4989. static size_t ZSTD_checkBufferStability(ZSTD_CCtx const* cctx,
  4990. ZSTD_outBuffer const* output,
  4991. ZSTD_inBuffer const* input,
  4992. ZSTD_EndDirective endOp)
  4993. {
  4994. if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
  4995. ZSTD_inBuffer const expect = cctx->expectedInBuffer;
  4996. if (expect.src != input->src || expect.pos != input->pos || expect.size != input->size)
  4997. RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer enabled but input differs!");
  4998. if (endOp != ZSTD_e_end)
  4999. RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer can only be used with ZSTD_e_end!");
  5000. }
  5001. if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {
  5002. size_t const outBufferSize = output->size - output->pos;
  5003. if (cctx->expectedOutBufferSize != outBufferSize)
  5004. RETURN_ERROR(dstBuffer_wrong, "ZSTD_c_stableOutBuffer enabled but output size differs!");
  5005. }
  5006. return 0;
  5007. }
  5008. static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
  5009. ZSTD_EndDirective endOp,
  5010. size_t inSize) {
  5011. ZSTD_CCtx_params params = cctx->requestedParams;
  5012. ZSTD_prefixDict const prefixDict = cctx->prefixDict;
  5013. FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */
  5014. ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */
  5015. assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */
  5016. if (cctx->cdict && !cctx->localDict.cdict) {
  5017. /* Let the cdict's compression level take priority over the requested params.
  5018. * But do not take the cdict's compression level if the "cdict" is actually a localDict
  5019. * generated from ZSTD_initLocalDict().
  5020. */
  5021. params.compressionLevel = cctx->cdict->compressionLevel;
  5022. }
  5023. DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage");
  5024. if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; /* auto-fix pledgedSrcSize */
  5025. {
  5026. size_t const dictSize = prefixDict.dict
  5027. ? prefixDict.dictSize
  5028. : (cctx->cdict ? cctx->cdict->dictContentSize : 0);
  5029. ZSTD_cParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, &params, cctx->pledgedSrcSizePlusOne - 1);
  5030. params.cParams = ZSTD_getCParamsFromCCtxParams(
  5031. &params, cctx->pledgedSrcSizePlusOne-1,
  5032. dictSize, mode);
  5033. }
  5034. params.useBlockSplitter = ZSTD_resolveBlockSplitterMode(params.useBlockSplitter, &params.cParams);
  5035. params.ldmParams.enableLdm = ZSTD_resolveEnableLdm(params.ldmParams.enableLdm, &params.cParams);
  5036. params.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params.useRowMatchFinder, &params.cParams);
  5037. #ifdef ZSTD_MULTITHREAD
  5038. if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) {
  5039. params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */
  5040. }
  5041. if (params.nbWorkers > 0) {
  5042. #if ZSTD_TRACE
  5043. cctx->traceCtx = (ZSTD_trace_compress_begin != NULL) ? ZSTD_trace_compress_begin(cctx) : 0;
  5044. #endif
  5045. /* mt context creation */
  5046. if (cctx->mtctx == NULL) {
  5047. DEBUGLOG(4, "ZSTD_compressStream2: creating new mtctx for nbWorkers=%u",
  5048. params.nbWorkers);
  5049. cctx->mtctx = ZSTDMT_createCCtx_advanced((U32)params.nbWorkers, cctx->customMem, cctx->pool);
  5050. RETURN_ERROR_IF(cctx->mtctx == NULL, memory_allocation, "NULL pointer!");
  5051. }
  5052. /* mt compression */
  5053. DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbWorkers=%u", params.nbWorkers);
  5054. FORWARD_IF_ERROR( ZSTDMT_initCStream_internal(
  5055. cctx->mtctx,
  5056. prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType,
  5057. cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) , "");
  5058. cctx->dictID = cctx->cdict ? cctx->cdict->dictID : 0;
  5059. cctx->dictContentSize = cctx->cdict ? cctx->cdict->dictContentSize : prefixDict.dictSize;
  5060. cctx->consumedSrcSize = 0;
  5061. cctx->producedCSize = 0;
  5062. cctx->streamStage = zcss_load;
  5063. cctx->appliedParams = params;
  5064. } else
  5065. #endif
  5066. { U64 const pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1;
  5067. assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
  5068. FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
  5069. prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, ZSTD_dtlm_fast,
  5070. cctx->cdict,
  5071. &params, pledgedSrcSize,
  5072. ZSTDb_buffered) , "");
  5073. assert(cctx->appliedParams.nbWorkers == 0);
  5074. cctx->inToCompress = 0;
  5075. cctx->inBuffPos = 0;
  5076. if (cctx->appliedParams.inBufferMode == ZSTD_bm_buffered) {
  5077. /* for small input: avoid automatic flush on reaching end of block, since
  5078. * it would require to add a 3-bytes null block to end frame
  5079. */
  5080. cctx->inBuffTarget = cctx->blockSize + (cctx->blockSize == pledgedSrcSize);
  5081. } else {
  5082. cctx->inBuffTarget = 0;
  5083. }
  5084. cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0;
  5085. cctx->streamStage = zcss_load;
  5086. cctx->frameEnded = 0;
  5087. }
  5088. return 0;
  5089. }
  5090. size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
  5091. ZSTD_outBuffer* output,
  5092. ZSTD_inBuffer* input,
  5093. ZSTD_EndDirective endOp)
  5094. {
  5095. DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp);
  5096. /* check conditions */
  5097. RETURN_ERROR_IF(output->pos > output->size, dstSize_tooSmall, "invalid output buffer");
  5098. RETURN_ERROR_IF(input->pos > input->size, srcSize_wrong, "invalid input buffer");
  5099. RETURN_ERROR_IF((U32)endOp > (U32)ZSTD_e_end, parameter_outOfBound, "invalid endDirective");
  5100. assert(cctx != NULL);
  5101. /* transparent initialization stage */
  5102. if (cctx->streamStage == zcss_init) {
  5103. FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, input->size), "CompressStream2 initialization failed");
  5104. ZSTD_setBufferExpectations(cctx, output, input); /* Set initial buffer expectations now that we've initialized */
  5105. }
  5106. /* end of transparent initialization stage */
  5107. FORWARD_IF_ERROR(ZSTD_checkBufferStability(cctx, output, input, endOp), "invalid buffers");
  5108. /* compression stage */
  5109. #ifdef ZSTD_MULTITHREAD
  5110. if (cctx->appliedParams.nbWorkers > 0) {
  5111. size_t flushMin;
  5112. if (cctx->cParamsChanged) {
  5113. ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams);
  5114. cctx->cParamsChanged = 0;
  5115. }
  5116. for (;;) {
  5117. size_t const ipos = input->pos;
  5118. size_t const opos = output->pos;
  5119. flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);
  5120. cctx->consumedSrcSize += (U64)(input->pos - ipos);
  5121. cctx->producedCSize += (U64)(output->pos - opos);
  5122. if ( ZSTD_isError(flushMin)
  5123. || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */
  5124. if (flushMin == 0)
  5125. ZSTD_CCtx_trace(cctx, 0);
  5126. ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
  5127. }
  5128. FORWARD_IF_ERROR(flushMin, "ZSTDMT_compressStream_generic failed");
  5129. if (endOp == ZSTD_e_continue) {
  5130. /* We only require some progress with ZSTD_e_continue, not maximal progress.
  5131. * We're done if we've consumed or produced any bytes, or either buffer is
  5132. * full.
  5133. */
  5134. if (input->pos != ipos || output->pos != opos || input->pos == input->size || output->pos == output->size)
  5135. break;
  5136. } else {
  5137. assert(endOp == ZSTD_e_flush || endOp == ZSTD_e_end);
  5138. /* We require maximal progress. We're done when the flush is complete or the
  5139. * output buffer is full.
  5140. */
  5141. if (flushMin == 0 || output->pos == output->size)
  5142. break;
  5143. }
  5144. }
  5145. DEBUGLOG(5, "completed ZSTD_compressStream2 delegating to ZSTDMT_compressStream_generic");
  5146. /* Either we don't require maximum forward progress, we've finished the
  5147. * flush, or we are out of output space.
  5148. */
  5149. assert(endOp == ZSTD_e_continue || flushMin == 0 || output->pos == output->size);
  5150. ZSTD_setBufferExpectations(cctx, output, input);
  5151. return flushMin;
  5152. }
  5153. #endif
  5154. FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) , "");
  5155. DEBUGLOG(5, "completed ZSTD_compressStream2");
  5156. ZSTD_setBufferExpectations(cctx, output, input);
  5157. return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
  5158. }
  5159. size_t ZSTD_compressStream2_simpleArgs (
  5160. ZSTD_CCtx* cctx,
  5161. void* dst, size_t dstCapacity, size_t* dstPos,
  5162. const void* src, size_t srcSize, size_t* srcPos,
  5163. ZSTD_EndDirective endOp)
  5164. {
  5165. ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
  5166. ZSTD_inBuffer input = { src, srcSize, *srcPos };
  5167. /* ZSTD_compressStream2() will check validity of dstPos and srcPos */
  5168. size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp);
  5169. *dstPos = output.pos;
  5170. *srcPos = input.pos;
  5171. return cErr;
  5172. }
  5173. size_t ZSTD_compress2(ZSTD_CCtx* cctx,
  5174. void* dst, size_t dstCapacity,
  5175. const void* src, size_t srcSize)
  5176. {
  5177. ZSTD_bufferMode_e const originalInBufferMode = cctx->requestedParams.inBufferMode;
  5178. ZSTD_bufferMode_e const originalOutBufferMode = cctx->requestedParams.outBufferMode;
  5179. DEBUGLOG(4, "ZSTD_compress2 (srcSize=%u)", (unsigned)srcSize);
  5180. ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
  5181. /* Enable stable input/output buffers. */
  5182. cctx->requestedParams.inBufferMode = ZSTD_bm_stable;
  5183. cctx->requestedParams.outBufferMode = ZSTD_bm_stable;
  5184. { size_t oPos = 0;
  5185. size_t iPos = 0;
  5186. size_t const result = ZSTD_compressStream2_simpleArgs(cctx,
  5187. dst, dstCapacity, &oPos,
  5188. src, srcSize, &iPos,
  5189. ZSTD_e_end);
  5190. /* Reset to the original values. */
  5191. cctx->requestedParams.inBufferMode = originalInBufferMode;
  5192. cctx->requestedParams.outBufferMode = originalOutBufferMode;
  5193. FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed");
  5194. if (result != 0) { /* compression not completed, due to lack of output space */
  5195. assert(oPos == dstCapacity);
  5196. RETURN_ERROR(dstSize_tooSmall, "");
  5197. }
  5198. assert(iPos == srcSize); /* all input is expected consumed */
  5199. return oPos;
  5200. }
  5201. }
  5202. typedef struct {
  5203. U32 idx; /* Index in array of ZSTD_Sequence */
  5204. U32 posInSequence; /* Position within sequence at idx */
  5205. size_t posInSrc; /* Number of bytes given by sequences provided so far */
  5206. } ZSTD_sequencePosition;
  5207. /* ZSTD_validateSequence() :
  5208. * @offCode : is presumed to follow format required by ZSTD_storeSeq()
  5209. * @returns a ZSTD error code if sequence is not valid
  5210. */
  5211. static size_t
  5212. ZSTD_validateSequence(U32 offCode, U32 matchLength,
  5213. size_t posInSrc, U32 windowLog, size_t dictSize)
  5214. {
  5215. U32 const windowSize = 1 << windowLog;
  5216. /* posInSrc represents the amount of data the the decoder would decode up to this point.
  5217. * As long as the amount of data decoded is less than or equal to window size, offsets may be
  5218. * larger than the total length of output decoded in order to reference the dict, even larger than
  5219. * window size. After output surpasses windowSize, we're limited to windowSize offsets again.
  5220. */
  5221. size_t const offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize;
  5222. RETURN_ERROR_IF(offCode > STORE_OFFSET(offsetBound), corruption_detected, "Offset too large!");
  5223. RETURN_ERROR_IF(matchLength < MINMATCH, corruption_detected, "Matchlength too small");
  5224. return 0;
  5225. }
  5226. /* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */
  5227. static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0)
  5228. {
  5229. U32 offCode = STORE_OFFSET(rawOffset);
  5230. if (!ll0 && rawOffset == rep[0]) {
  5231. offCode = STORE_REPCODE_1;
  5232. } else if (rawOffset == rep[1]) {
  5233. offCode = STORE_REPCODE(2 - ll0);
  5234. } else if (rawOffset == rep[2]) {
  5235. offCode = STORE_REPCODE(3 - ll0);
  5236. } else if (ll0 && rawOffset == rep[0] - 1) {
  5237. offCode = STORE_REPCODE_3;
  5238. }
  5239. return offCode;
  5240. }
  5241. /* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of
  5242. * ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter.
  5243. */
  5244. static size_t
  5245. ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx,
  5246. ZSTD_sequencePosition* seqPos,
  5247. const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
  5248. const void* src, size_t blockSize)
  5249. {
  5250. U32 idx = seqPos->idx;
  5251. BYTE const* ip = (BYTE const*)(src);
  5252. const BYTE* const iend = ip + blockSize;
  5253. repcodes_t updatedRepcodes;
  5254. U32 dictSize;
  5255. if (cctx->cdict) {
  5256. dictSize = (U32)cctx->cdict->dictContentSize;
  5257. } else if (cctx->prefixDict.dict) {
  5258. dictSize = (U32)cctx->prefixDict.dictSize;
  5259. } else {
  5260. dictSize = 0;
  5261. }
  5262. ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
  5263. for (; (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0) && idx < inSeqsSize; ++idx) {
  5264. U32 const litLength = inSeqs[idx].litLength;
  5265. U32 const ll0 = (litLength == 0);
  5266. U32 const matchLength = inSeqs[idx].matchLength;
  5267. U32 const offCode = ZSTD_finalizeOffCode(inSeqs[idx].offset, updatedRepcodes.rep, ll0);
  5268. ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
  5269. DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
  5270. if (cctx->appliedParams.validateSequences) {
  5271. seqPos->posInSrc += litLength + matchLength;
  5272. FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
  5273. cctx->appliedParams.cParams.windowLog, dictSize),
  5274. "Sequence validation failed");
  5275. }
  5276. RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
  5277. "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
  5278. ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength);
  5279. ip += matchLength + litLength;
  5280. }
  5281. ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
  5282. if (inSeqs[idx].litLength) {
  5283. DEBUGLOG(6, "Storing last literals of size: %u", inSeqs[idx].litLength);
  5284. ZSTD_storeLastLiterals(&cctx->seqStore, ip, inSeqs[idx].litLength);
  5285. ip += inSeqs[idx].litLength;
  5286. seqPos->posInSrc += inSeqs[idx].litLength;
  5287. }
  5288. RETURN_ERROR_IF(ip != iend, corruption_detected, "Blocksize doesn't agree with block delimiter!");
  5289. seqPos->idx = idx+1;
  5290. return 0;
  5291. }
  5292. /* Returns the number of bytes to move the current read position back by. Only non-zero
  5293. * if we ended up splitting a sequence. Otherwise, it may return a ZSTD error if something
  5294. * went wrong.
  5295. *
  5296. * This function will attempt to scan through blockSize bytes represented by the sequences
  5297. * in inSeqs, storing any (partial) sequences.
  5298. *
  5299. * Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to
  5300. * avoid splitting a match, or to avoid splitting a match such that it would produce a match
  5301. * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block.
  5302. */
  5303. static size_t
  5304. ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
  5305. const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
  5306. const void* src, size_t blockSize)
  5307. {
  5308. U32 idx = seqPos->idx;
  5309. U32 startPosInSequence = seqPos->posInSequence;
  5310. U32 endPosInSequence = seqPos->posInSequence + (U32)blockSize;
  5311. size_t dictSize;
  5312. BYTE const* ip = (BYTE const*)(src);
  5313. BYTE const* iend = ip + blockSize; /* May be adjusted if we decide to process fewer than blockSize bytes */
  5314. repcodes_t updatedRepcodes;
  5315. U32 bytesAdjustment = 0;
  5316. U32 finalMatchSplit = 0;
  5317. if (cctx->cdict) {
  5318. dictSize = cctx->cdict->dictContentSize;
  5319. } else if (cctx->prefixDict.dict) {
  5320. dictSize = cctx->prefixDict.dictSize;
  5321. } else {
  5322. dictSize = 0;
  5323. }
  5324. DEBUGLOG(5, "ZSTD_copySequencesToSeqStore: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize);
  5325. DEBUGLOG(5, "Start seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
  5326. ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
  5327. while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) {
  5328. const ZSTD_Sequence currSeq = inSeqs[idx];
  5329. U32 litLength = currSeq.litLength;
  5330. U32 matchLength = currSeq.matchLength;
  5331. U32 const rawOffset = currSeq.offset;
  5332. U32 offCode;
  5333. /* Modify the sequence depending on where endPosInSequence lies */
  5334. if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) {
  5335. if (startPosInSequence >= litLength) {
  5336. startPosInSequence -= litLength;
  5337. litLength = 0;
  5338. matchLength -= startPosInSequence;
  5339. } else {
  5340. litLength -= startPosInSequence;
  5341. }
  5342. /* Move to the next sequence */
  5343. endPosInSequence -= currSeq.litLength + currSeq.matchLength;
  5344. startPosInSequence = 0;
  5345. idx++;
  5346. } else {
  5347. /* This is the final (partial) sequence we're adding from inSeqs, and endPosInSequence
  5348. does not reach the end of the match. So, we have to split the sequence */
  5349. DEBUGLOG(6, "Require a split: diff: %u, idx: %u PIS: %u",
  5350. currSeq.litLength + currSeq.matchLength - endPosInSequence, idx, endPosInSequence);
  5351. if (endPosInSequence > litLength) {
  5352. U32 firstHalfMatchLength;
  5353. litLength = startPosInSequence >= litLength ? 0 : litLength - startPosInSequence;
  5354. firstHalfMatchLength = endPosInSequence - startPosInSequence - litLength;
  5355. if (matchLength > blockSize && firstHalfMatchLength >= cctx->appliedParams.cParams.minMatch) {
  5356. /* Only ever split the match if it is larger than the block size */
  5357. U32 secondHalfMatchLength = currSeq.matchLength + currSeq.litLength - endPosInSequence;
  5358. if (secondHalfMatchLength < cctx->appliedParams.cParams.minMatch) {
  5359. /* Move the endPosInSequence backward so that it creates match of minMatch length */
  5360. endPosInSequence -= cctx->appliedParams.cParams.minMatch - secondHalfMatchLength;
  5361. bytesAdjustment = cctx->appliedParams.cParams.minMatch - secondHalfMatchLength;
  5362. firstHalfMatchLength -= bytesAdjustment;
  5363. }
  5364. matchLength = firstHalfMatchLength;
  5365. /* Flag that we split the last match - after storing the sequence, exit the loop,
  5366. but keep the value of endPosInSequence */
  5367. finalMatchSplit = 1;
  5368. } else {
  5369. /* Move the position in sequence backwards so that we don't split match, and break to store
  5370. * the last literals. We use the original currSeq.litLength as a marker for where endPosInSequence
  5371. * should go. We prefer to do this whenever it is not necessary to split the match, or if doing so
  5372. * would cause the first half of the match to be too small
  5373. */
  5374. bytesAdjustment = endPosInSequence - currSeq.litLength;
  5375. endPosInSequence = currSeq.litLength;
  5376. break;
  5377. }
  5378. } else {
  5379. /* This sequence ends inside the literals, break to store the last literals */
  5380. break;
  5381. }
  5382. }
  5383. /* Check if this offset can be represented with a repcode */
  5384. { U32 const ll0 = (litLength == 0);
  5385. offCode = ZSTD_finalizeOffCode(rawOffset, updatedRepcodes.rep, ll0);
  5386. ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
  5387. }
  5388. if (cctx->appliedParams.validateSequences) {
  5389. seqPos->posInSrc += litLength + matchLength;
  5390. FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
  5391. cctx->appliedParams.cParams.windowLog, dictSize),
  5392. "Sequence validation failed");
  5393. }
  5394. DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
  5395. RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
  5396. "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
  5397. ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength);
  5398. ip += matchLength + litLength;
  5399. }
  5400. DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
  5401. assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength);
  5402. seqPos->idx = idx;
  5403. seqPos->posInSequence = endPosInSequence;
  5404. ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
  5405. iend -= bytesAdjustment;
  5406. if (ip != iend) {
  5407. /* Store any last literals */
  5408. U32 lastLLSize = (U32)(iend - ip);
  5409. assert(ip <= iend);
  5410. DEBUGLOG(6, "Storing last literals of size: %u", lastLLSize);
  5411. ZSTD_storeLastLiterals(&cctx->seqStore, ip, lastLLSize);
  5412. seqPos->posInSrc += lastLLSize;
  5413. }
  5414. return bytesAdjustment;
  5415. }
  5416. typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
  5417. const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
  5418. const void* src, size_t blockSize);
  5419. static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode)
  5420. {
  5421. ZSTD_sequenceCopier sequenceCopier = NULL;
  5422. assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, mode));
  5423. if (mode == ZSTD_sf_explicitBlockDelimiters) {
  5424. return ZSTD_copySequencesToSeqStoreExplicitBlockDelim;
  5425. } else if (mode == ZSTD_sf_noBlockDelimiters) {
  5426. return ZSTD_copySequencesToSeqStoreNoBlockDelim;
  5427. }
  5428. assert(sequenceCopier != NULL);
  5429. return sequenceCopier;
  5430. }
  5431. /* Compress, block-by-block, all of the sequences given.
  5432. *
  5433. * Returns the cumulative size of all compressed blocks (including their headers),
  5434. * otherwise a ZSTD error.
  5435. */
  5436. static size_t
  5437. ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
  5438. void* dst, size_t dstCapacity,
  5439. const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
  5440. const void* src, size_t srcSize)
  5441. {
  5442. size_t cSize = 0;
  5443. U32 lastBlock;
  5444. size_t blockSize;
  5445. size_t compressedSeqsSize;
  5446. size_t remaining = srcSize;
  5447. ZSTD_sequencePosition seqPos = {0, 0, 0};
  5448. BYTE const* ip = (BYTE const*)src;
  5449. BYTE* op = (BYTE*)dst;
  5450. ZSTD_sequenceCopier const sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters);
  5451. DEBUGLOG(4, "ZSTD_compressSequences_internal srcSize: %zu, inSeqsSize: %zu", srcSize, inSeqsSize);
  5452. /* Special case: empty frame */
  5453. if (remaining == 0) {
  5454. U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1);
  5455. RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "No room for empty frame block header");
  5456. MEM_writeLE32(op, cBlockHeader24);
  5457. op += ZSTD_blockHeaderSize;
  5458. dstCapacity -= ZSTD_blockHeaderSize;
  5459. cSize += ZSTD_blockHeaderSize;
  5460. }
  5461. while (remaining) {
  5462. size_t cBlockSize;
  5463. size_t additionalByteAdjustment;
  5464. lastBlock = remaining <= cctx->blockSize;
  5465. blockSize = lastBlock ? (U32)remaining : (U32)cctx->blockSize;
  5466. ZSTD_resetSeqStore(&cctx->seqStore);
  5467. DEBUGLOG(4, "Working on new block. Blocksize: %zu", blockSize);
  5468. additionalByteAdjustment = sequenceCopier(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize);
  5469. FORWARD_IF_ERROR(additionalByteAdjustment, "Bad sequence copy");
  5470. blockSize -= additionalByteAdjustment;
  5471. /* If blocks are too small, emit as a nocompress block */
  5472. if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
  5473. cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
  5474. FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
  5475. DEBUGLOG(4, "Block too small, writing out nocompress block: cSize: %zu", cBlockSize);
  5476. cSize += cBlockSize;
  5477. ip += blockSize;
  5478. op += cBlockSize;
  5479. remaining -= blockSize;
  5480. dstCapacity -= cBlockSize;
  5481. continue;
  5482. }
  5483. compressedSeqsSize = ZSTD_entropyCompressSeqStore(&cctx->seqStore,
  5484. &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy,
  5485. &cctx->appliedParams,
  5486. op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize,
  5487. blockSize,
  5488. cctx->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
  5489. cctx->bmi2);
  5490. FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed");
  5491. DEBUGLOG(4, "Compressed sequences size: %zu", compressedSeqsSize);
  5492. if (!cctx->isFirstBlock &&
  5493. ZSTD_maybeRLE(&cctx->seqStore) &&
  5494. ZSTD_isRLE((BYTE const*)src, srcSize)) {
  5495. /* We don't want to emit our first block as a RLE even if it qualifies because
  5496. * doing so will cause the decoder (cli only) to throw a "should consume all input error."
  5497. * This is only an issue for zstd <= v1.4.3
  5498. */
  5499. compressedSeqsSize = 1;
  5500. }
  5501. if (compressedSeqsSize == 0) {
  5502. /* ZSTD_noCompressBlock writes the block header as well */
  5503. cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
  5504. FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
  5505. DEBUGLOG(4, "Writing out nocompress block, size: %zu", cBlockSize);
  5506. } else if (compressedSeqsSize == 1) {
  5507. cBlockSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, blockSize, lastBlock);
  5508. FORWARD_IF_ERROR(cBlockSize, "RLE compress block failed");
  5509. DEBUGLOG(4, "Writing out RLE block, size: %zu", cBlockSize);
  5510. } else {
  5511. U32 cBlockHeader;
  5512. /* Error checking and repcodes update */
  5513. ZSTD_blockState_confirmRepcodesAndEntropyTables(&cctx->blockState);
  5514. if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
  5515. cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
  5516. /* Write block header into beginning of block*/
  5517. cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3);
  5518. MEM_writeLE24(op, cBlockHeader);
  5519. cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize;
  5520. DEBUGLOG(4, "Writing out compressed block, size: %zu", cBlockSize);
  5521. }
  5522. cSize += cBlockSize;
  5523. DEBUGLOG(4, "cSize running total: %zu", cSize);
  5524. if (lastBlock) {
  5525. break;
  5526. } else {
  5527. ip += blockSize;
  5528. op += cBlockSize;
  5529. remaining -= blockSize;
  5530. dstCapacity -= cBlockSize;
  5531. cctx->isFirstBlock = 0;
  5532. }
  5533. }
  5534. return cSize;
  5535. }
  5536. size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapacity,
  5537. const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
  5538. const void* src, size_t srcSize)
  5539. {
  5540. BYTE* op = (BYTE*)dst;
  5541. size_t cSize = 0;
  5542. size_t compressedBlocksSize = 0;
  5543. size_t frameHeaderSize = 0;
  5544. /* Transparent initialization stage, same as compressStream2() */
  5545. DEBUGLOG(3, "ZSTD_compressSequences()");
  5546. assert(cctx != NULL);
  5547. FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, srcSize), "CCtx initialization failed");
  5548. /* Begin writing output, starting with frame header */
  5549. frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, &cctx->appliedParams, srcSize, cctx->dictID);
  5550. op += frameHeaderSize;
  5551. dstCapacity -= frameHeaderSize;
  5552. cSize += frameHeaderSize;
  5553. if (cctx->appliedParams.fParams.checksumFlag && srcSize) {
  5554. XXH64_update(&cctx->xxhState, src, srcSize);
  5555. }
  5556. /* cSize includes block header size and compressed sequences size */
  5557. compressedBlocksSize = ZSTD_compressSequences_internal(cctx,
  5558. op, dstCapacity,
  5559. inSeqs, inSeqsSize,
  5560. src, srcSize);
  5561. FORWARD_IF_ERROR(compressedBlocksSize, "Compressing blocks failed!");
  5562. cSize += compressedBlocksSize;
  5563. dstCapacity -= compressedBlocksSize;
  5564. if (cctx->appliedParams.fParams.checksumFlag) {
  5565. U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
  5566. RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
  5567. DEBUGLOG(4, "Write checksum : %08X", (unsigned)checksum);
  5568. MEM_writeLE32((char*)dst + cSize, checksum);
  5569. cSize += 4;
  5570. }
  5571. DEBUGLOG(3, "Final compressed size: %zu", cSize);
  5572. return cSize;
  5573. }
  5574. /*====== Finalize ======*/
  5575. /*! ZSTD_flushStream() :
  5576. * @return : amount of data remaining to flush */
  5577. size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
  5578. {
  5579. ZSTD_inBuffer input = { NULL, 0, 0 };
  5580. return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush);
  5581. }
  5582. size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
  5583. {
  5584. ZSTD_inBuffer input = { NULL, 0, 0 };
  5585. size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end);
  5586. FORWARD_IF_ERROR( remainingToFlush , "ZSTD_compressStream2 failed");
  5587. if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush; /* minimal estimation */
  5588. /* single thread mode : attempt to calculate remaining to flush more precisely */
  5589. { size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
  5590. size_t const checksumSize = (size_t)(zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4);
  5591. size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize;
  5592. DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush);
  5593. return toFlush;
  5594. }
  5595. }
  5596. /*-===== Pre-defined compression levels =====-*/
  5597. #include "clevels.h"
  5598. int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
  5599. int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; }
  5600. int ZSTD_defaultCLevel(void) { return ZSTD_CLEVEL_DEFAULT; }
  5601. static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(int const compressionLevel, size_t const dictSize)
  5602. {
  5603. ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, 0, dictSize, ZSTD_cpm_createCDict);
  5604. switch (cParams.strategy) {
  5605. case ZSTD_fast:
  5606. case ZSTD_dfast:
  5607. break;
  5608. case ZSTD_greedy:
  5609. case ZSTD_lazy:
  5610. case ZSTD_lazy2:
  5611. cParams.hashLog += ZSTD_LAZY_DDSS_BUCKET_LOG;
  5612. break;
  5613. case ZSTD_btlazy2:
  5614. case ZSTD_btopt:
  5615. case ZSTD_btultra:
  5616. case ZSTD_btultra2:
  5617. break;
  5618. }
  5619. return cParams;
  5620. }
  5621. static int ZSTD_dedicatedDictSearch_isSupported(
  5622. ZSTD_compressionParameters const* cParams)
  5623. {
  5624. return (cParams->strategy >= ZSTD_greedy)
  5625. && (cParams->strategy <= ZSTD_lazy2)
  5626. && (cParams->hashLog > cParams->chainLog)
  5627. && (cParams->chainLog <= 24);
  5628. }
  5629. /**
  5630. * Reverses the adjustment applied to cparams when enabling dedicated dict
  5631. * search. This is used to recover the params set to be used in the working
  5632. * context. (Otherwise, those tables would also grow.)
  5633. */
  5634. static void ZSTD_dedicatedDictSearch_revertCParams(
  5635. ZSTD_compressionParameters* cParams) {
  5636. switch (cParams->strategy) {
  5637. case ZSTD_fast:
  5638. case ZSTD_dfast:
  5639. break;
  5640. case ZSTD_greedy:
  5641. case ZSTD_lazy:
  5642. case ZSTD_lazy2:
  5643. cParams->hashLog -= ZSTD_LAZY_DDSS_BUCKET_LOG;
  5644. if (cParams->hashLog < ZSTD_HASHLOG_MIN) {
  5645. cParams->hashLog = ZSTD_HASHLOG_MIN;
  5646. }
  5647. break;
  5648. case ZSTD_btlazy2:
  5649. case ZSTD_btopt:
  5650. case ZSTD_btultra:
  5651. case ZSTD_btultra2:
  5652. break;
  5653. }
  5654. }
  5655. static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
  5656. {
  5657. switch (mode) {
  5658. case ZSTD_cpm_unknown:
  5659. case ZSTD_cpm_noAttachDict:
  5660. case ZSTD_cpm_createCDict:
  5661. break;
  5662. case ZSTD_cpm_attachDict:
  5663. dictSize = 0;
  5664. break;
  5665. default:
  5666. assert(0);
  5667. break;
  5668. }
  5669. { int const unknown = srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN;
  5670. size_t const addedSize = unknown && dictSize > 0 ? 500 : 0;
  5671. return unknown && dictSize == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : srcSizeHint+dictSize+addedSize;
  5672. }
  5673. }
  5674. /*! ZSTD_getCParams_internal() :
  5675. * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
  5676. * Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown.
  5677. * Use dictSize == 0 for unknown or unused.
  5678. * Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_cParamMode_e`. */
  5679. static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
  5680. {
  5681. U64 const rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode);
  5682. U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);
  5683. int row;
  5684. DEBUGLOG(5, "ZSTD_getCParams_internal (cLevel=%i)", compressionLevel);
  5685. /* row */
  5686. if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
  5687. else if (compressionLevel < 0) row = 0; /* entry 0 is baseline for fast mode */
  5688. else if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
  5689. else row = compressionLevel;
  5690. { ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
  5691. DEBUGLOG(5, "ZSTD_getCParams_internal selected tableID: %u row: %u strat: %u", tableID, row, (U32)cp.strategy);
  5692. /* acceleration factor */
  5693. if (compressionLevel < 0) {
  5694. int const clampedCompressionLevel = MAX(ZSTD_minCLevel(), compressionLevel);
  5695. cp.targetLength = (unsigned)(-clampedCompressionLevel);
  5696. }
  5697. /* refine parameters based on srcSize & dictSize */
  5698. return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode);
  5699. }
  5700. }
  5701. /*! ZSTD_getCParams() :
  5702. * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
  5703. * Size values are optional, provide 0 if not known or unused */
  5704. ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
  5705. {
  5706. if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
  5707. return ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
  5708. }
  5709. /*! ZSTD_getParams() :
  5710. * same idea as ZSTD_getCParams()
  5711. * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
  5712. * Fields of `ZSTD_frameParameters` are set to default values */
  5713. static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) {
  5714. ZSTD_parameters params;
  5715. ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, mode);
  5716. DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel);
  5717. ZSTD_memset(&params, 0, sizeof(params));
  5718. params.cParams = cParams;
  5719. params.fParams.contentSizeFlag = 1;
  5720. return params;
  5721. }
  5722. /*! ZSTD_getParams() :
  5723. * same idea as ZSTD_getCParams()
  5724. * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
  5725. * Fields of `ZSTD_frameParameters` are set to default values */
  5726. ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
  5727. if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
  5728. return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
  5729. }