zstd_opt.c 65 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446
  1. /*
  2. * Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
  3. * All rights reserved.
  4. *
  5. * This source code is licensed under both the BSD-style license (found in the
  6. * LICENSE file in the root directory of this source tree) and the GPLv2 (found
  7. * in the COPYING file in the root directory of this source tree).
  8. * You may select, at your option, one of the above-listed licenses.
  9. */
  10. #include "zstd_compress_internal.h"
  11. #include "hist.h"
  12. #include "zstd_opt.h"
  13. #define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */
  14. #define ZSTD_MAX_PRICE (1<<30)
  15. #define ZSTD_PREDEF_THRESHOLD 1024 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */
  16. /*-*************************************
  17. * Price functions for optimal parser
  18. ***************************************/
  19. #if 0 /* approximation at bit level (for tests) */
  20. # define BITCOST_ACCURACY 0
  21. # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
  22. # define WEIGHT(stat, opt) ((void)opt, ZSTD_bitWeight(stat))
  23. #elif 0 /* fractional bit accuracy (for tests) */
  24. # define BITCOST_ACCURACY 8
  25. # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
  26. # define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat))
  27. #else /* opt==approx, ultra==accurate */
  28. # define BITCOST_ACCURACY 8
  29. # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
  30. # define WEIGHT(stat,opt) (opt ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat))
  31. #endif
  32. MEM_STATIC U32 ZSTD_bitWeight(U32 stat)
  33. {
  34. return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER);
  35. }
  36. MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat)
  37. {
  38. U32 const stat = rawStat + 1;
  39. U32 const hb = ZSTD_highbit32(stat);
  40. U32 const BWeight = hb * BITCOST_MULTIPLIER;
  41. U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb;
  42. U32 const weight = BWeight + FWeight;
  43. assert(hb + BITCOST_ACCURACY < 31);
  44. return weight;
  45. }
  46. #if (DEBUGLEVEL>=2)
  47. /* debugging function,
  48. * @return price in bytes as fractional value
  49. * for debug messages only */
  50. MEM_STATIC double ZSTD_fCost(U32 price)
  51. {
  52. return (double)price / (BITCOST_MULTIPLIER*8);
  53. }
  54. #endif
  55. static int ZSTD_compressedLiterals(optState_t const* const optPtr)
  56. {
  57. return optPtr->literalCompressionMode != ZSTD_ps_disable;
  58. }
  59. static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)
  60. {
  61. if (ZSTD_compressedLiterals(optPtr))
  62. optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel);
  63. optPtr->litLengthSumBasePrice = WEIGHT(optPtr->litLengthSum, optLevel);
  64. optPtr->matchLengthSumBasePrice = WEIGHT(optPtr->matchLengthSum, optLevel);
  65. optPtr->offCodeSumBasePrice = WEIGHT(optPtr->offCodeSum, optLevel);
  66. }
  67. static U32 sum_u32(const unsigned table[], size_t nbElts)
  68. {
  69. size_t n;
  70. U32 total = 0;
  71. for (n=0; n<nbElts; n++) {
  72. total += table[n];
  73. }
  74. return total;
  75. }
  76. static U32 ZSTD_downscaleStats(unsigned* table, U32 lastEltIndex, U32 shift)
  77. {
  78. U32 s, sum=0;
  79. DEBUGLOG(5, "ZSTD_downscaleStats (nbElts=%u, shift=%u)", (unsigned)lastEltIndex+1, (unsigned)shift);
  80. assert(shift < 30);
  81. for (s=0; s<lastEltIndex+1; s++) {
  82. table[s] = 1 + (table[s] >> shift);
  83. sum += table[s];
  84. }
  85. return sum;
  86. }
  87. /* ZSTD_scaleStats() :
  88. * reduce all elements in table is sum too large
  89. * return the resulting sum of elements */
  90. static U32 ZSTD_scaleStats(unsigned* table, U32 lastEltIndex, U32 logTarget)
  91. {
  92. U32 const prevsum = sum_u32(table, lastEltIndex+1);
  93. U32 const factor = prevsum >> logTarget;
  94. DEBUGLOG(5, "ZSTD_scaleStats (nbElts=%u, target=%u)", (unsigned)lastEltIndex+1, (unsigned)logTarget);
  95. assert(logTarget < 30);
  96. if (factor <= 1) return prevsum;
  97. return ZSTD_downscaleStats(table, lastEltIndex, ZSTD_highbit32(factor));
  98. }
  99. /* ZSTD_rescaleFreqs() :
  100. * if first block (detected by optPtr->litLengthSum == 0) : init statistics
  101. * take hints from dictionary if there is one
  102. * and init from zero if there is none,
  103. * using src for literals stats, and baseline stats for sequence symbols
  104. * otherwise downscale existing stats, to be used as seed for next block.
  105. */
  106. static void
  107. ZSTD_rescaleFreqs(optState_t* const optPtr,
  108. const BYTE* const src, size_t const srcSize,
  109. int const optLevel)
  110. {
  111. int const compressedLiterals = ZSTD_compressedLiterals(optPtr);
  112. DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize);
  113. optPtr->priceType = zop_dynamic;
  114. if (optPtr->litLengthSum == 0) { /* first block : init */
  115. if (srcSize <= ZSTD_PREDEF_THRESHOLD) { /* heuristic */
  116. DEBUGLOG(5, "(srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef");
  117. optPtr->priceType = zop_predef;
  118. }
  119. assert(optPtr->symbolCosts != NULL);
  120. if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) {
  121. /* huffman table presumed generated by dictionary */
  122. optPtr->priceType = zop_dynamic;
  123. if (compressedLiterals) {
  124. unsigned lit;
  125. assert(optPtr->litFreq != NULL);
  126. optPtr->litSum = 0;
  127. for (lit=0; lit<=MaxLit; lit++) {
  128. U32 const scaleLog = 11; /* scale to 2K */
  129. U32 const bitCost = HUF_getNbBitsFromCTable(optPtr->symbolCosts->huf.CTable, lit);
  130. assert(bitCost <= scaleLog);
  131. optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
  132. optPtr->litSum += optPtr->litFreq[lit];
  133. } }
  134. { unsigned ll;
  135. FSE_CState_t llstate;
  136. FSE_initCState(&llstate, optPtr->symbolCosts->fse.litlengthCTable);
  137. optPtr->litLengthSum = 0;
  138. for (ll=0; ll<=MaxLL; ll++) {
  139. U32 const scaleLog = 10; /* scale to 1K */
  140. U32 const bitCost = FSE_getMaxNbBits(llstate.symbolTT, ll);
  141. assert(bitCost < scaleLog);
  142. optPtr->litLengthFreq[ll] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
  143. optPtr->litLengthSum += optPtr->litLengthFreq[ll];
  144. } }
  145. { unsigned ml;
  146. FSE_CState_t mlstate;
  147. FSE_initCState(&mlstate, optPtr->symbolCosts->fse.matchlengthCTable);
  148. optPtr->matchLengthSum = 0;
  149. for (ml=0; ml<=MaxML; ml++) {
  150. U32 const scaleLog = 10;
  151. U32 const bitCost = FSE_getMaxNbBits(mlstate.symbolTT, ml);
  152. assert(bitCost < scaleLog);
  153. optPtr->matchLengthFreq[ml] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
  154. optPtr->matchLengthSum += optPtr->matchLengthFreq[ml];
  155. } }
  156. { unsigned of;
  157. FSE_CState_t ofstate;
  158. FSE_initCState(&ofstate, optPtr->symbolCosts->fse.offcodeCTable);
  159. optPtr->offCodeSum = 0;
  160. for (of=0; of<=MaxOff; of++) {
  161. U32 const scaleLog = 10;
  162. U32 const bitCost = FSE_getMaxNbBits(ofstate.symbolTT, of);
  163. assert(bitCost < scaleLog);
  164. optPtr->offCodeFreq[of] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
  165. optPtr->offCodeSum += optPtr->offCodeFreq[of];
  166. } }
  167. } else { /* not a dictionary */
  168. assert(optPtr->litFreq != NULL);
  169. if (compressedLiterals) {
  170. unsigned lit = MaxLit;
  171. HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); /* use raw first block to init statistics */
  172. optPtr->litSum = ZSTD_downscaleStats(optPtr->litFreq, MaxLit, 8);
  173. }
  174. { unsigned const baseLLfreqs[MaxLL+1] = {
  175. 4, 2, 1, 1, 1, 1, 1, 1,
  176. 1, 1, 1, 1, 1, 1, 1, 1,
  177. 1, 1, 1, 1, 1, 1, 1, 1,
  178. 1, 1, 1, 1, 1, 1, 1, 1,
  179. 1, 1, 1, 1
  180. };
  181. ZSTD_memcpy(optPtr->litLengthFreq, baseLLfreqs, sizeof(baseLLfreqs));
  182. optPtr->litLengthSum = sum_u32(baseLLfreqs, MaxLL+1);
  183. }
  184. { unsigned ml;
  185. for (ml=0; ml<=MaxML; ml++)
  186. optPtr->matchLengthFreq[ml] = 1;
  187. }
  188. optPtr->matchLengthSum = MaxML+1;
  189. { unsigned const baseOFCfreqs[MaxOff+1] = {
  190. 6, 2, 1, 1, 2, 3, 4, 4,
  191. 4, 3, 2, 1, 1, 1, 1, 1,
  192. 1, 1, 1, 1, 1, 1, 1, 1,
  193. 1, 1, 1, 1, 1, 1, 1, 1
  194. };
  195. ZSTD_memcpy(optPtr->offCodeFreq, baseOFCfreqs, sizeof(baseOFCfreqs));
  196. optPtr->offCodeSum = sum_u32(baseOFCfreqs, MaxOff+1);
  197. }
  198. }
  199. } else { /* new block : re-use previous statistics, scaled down */
  200. if (compressedLiterals)
  201. optPtr->litSum = ZSTD_scaleStats(optPtr->litFreq, MaxLit, 12);
  202. optPtr->litLengthSum = ZSTD_scaleStats(optPtr->litLengthFreq, MaxLL, 11);
  203. optPtr->matchLengthSum = ZSTD_scaleStats(optPtr->matchLengthFreq, MaxML, 11);
  204. optPtr->offCodeSum = ZSTD_scaleStats(optPtr->offCodeFreq, MaxOff, 11);
  205. }
  206. ZSTD_setBasePrices(optPtr, optLevel);
  207. }
  208. /* ZSTD_rawLiteralsCost() :
  209. * price of literals (only) in specified segment (which length can be 0).
  210. * does not include price of literalLength symbol */
  211. static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
  212. const optState_t* const optPtr,
  213. int optLevel)
  214. {
  215. if (litLength == 0) return 0;
  216. if (!ZSTD_compressedLiterals(optPtr))
  217. return (litLength << 3) * BITCOST_MULTIPLIER; /* Uncompressed - 8 bytes per literal. */
  218. if (optPtr->priceType == zop_predef)
  219. return (litLength*6) * BITCOST_MULTIPLIER; /* 6 bit per literal - no statistic used */
  220. /* dynamic statistics */
  221. { U32 price = litLength * optPtr->litSumBasePrice;
  222. U32 u;
  223. for (u=0; u < litLength; u++) {
  224. assert(WEIGHT(optPtr->litFreq[literals[u]], optLevel) <= optPtr->litSumBasePrice); /* literal cost should never be negative */
  225. price -= WEIGHT(optPtr->litFreq[literals[u]], optLevel);
  226. }
  227. return price;
  228. }
  229. }
  230. /* ZSTD_litLengthPrice() :
  231. * cost of literalLength symbol */
  232. static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel)
  233. {
  234. assert(litLength <= ZSTD_BLOCKSIZE_MAX);
  235. if (optPtr->priceType == zop_predef)
  236. return WEIGHT(litLength, optLevel);
  237. /* We can't compute the litLength price for sizes >= ZSTD_BLOCKSIZE_MAX
  238. * because it isn't representable in the zstd format. So instead just
  239. * call it 1 bit more than ZSTD_BLOCKSIZE_MAX - 1. In this case the block
  240. * would be all literals.
  241. */
  242. if (litLength == ZSTD_BLOCKSIZE_MAX)
  243. return BITCOST_MULTIPLIER + ZSTD_litLengthPrice(ZSTD_BLOCKSIZE_MAX - 1, optPtr, optLevel);
  244. /* dynamic statistics */
  245. { U32 const llCode = ZSTD_LLcode(litLength);
  246. return (LL_bits[llCode] * BITCOST_MULTIPLIER)
  247. + optPtr->litLengthSumBasePrice
  248. - WEIGHT(optPtr->litLengthFreq[llCode], optLevel);
  249. }
  250. }
  251. /* ZSTD_getMatchPrice() :
  252. * Provides the cost of the match part (offset + matchLength) of a sequence
  253. * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence.
  254. * @offcode : expects a scale where 0,1,2 are repcodes 1-3, and 3+ are real_offsets+2
  255. * @optLevel: when <2, favors small offset for decompression speed (improved cache efficiency)
  256. */
  257. FORCE_INLINE_TEMPLATE U32
  258. ZSTD_getMatchPrice(U32 const offcode,
  259. U32 const matchLength,
  260. const optState_t* const optPtr,
  261. int const optLevel)
  262. {
  263. U32 price;
  264. U32 const offCode = ZSTD_highbit32(STORED_TO_OFFBASE(offcode));
  265. U32 const mlBase = matchLength - MINMATCH;
  266. assert(matchLength >= MINMATCH);
  267. if (optPtr->priceType == zop_predef) /* fixed scheme, do not use statistics */
  268. return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER);
  269. /* dynamic statistics */
  270. price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel));
  271. if ((optLevel<2) /*static*/ && offCode >= 20)
  272. price += (offCode-19)*2 * BITCOST_MULTIPLIER; /* handicap for long distance offsets, favor decompression speed */
  273. /* match Length */
  274. { U32 const mlCode = ZSTD_MLcode(mlBase);
  275. price += (ML_bits[mlCode] * BITCOST_MULTIPLIER) + (optPtr->matchLengthSumBasePrice - WEIGHT(optPtr->matchLengthFreq[mlCode], optLevel));
  276. }
  277. price += BITCOST_MULTIPLIER / 5; /* heuristic : make matches a bit more costly to favor less sequences -> faster decompression speed */
  278. DEBUGLOG(8, "ZSTD_getMatchPrice(ml:%u) = %u", matchLength, price);
  279. return price;
  280. }
  281. /* ZSTD_updateStats() :
  282. * assumption : literals + litLengtn <= iend */
  283. static void ZSTD_updateStats(optState_t* const optPtr,
  284. U32 litLength, const BYTE* literals,
  285. U32 offsetCode, U32 matchLength)
  286. {
  287. /* literals */
  288. if (ZSTD_compressedLiterals(optPtr)) {
  289. U32 u;
  290. for (u=0; u < litLength; u++)
  291. optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
  292. optPtr->litSum += litLength*ZSTD_LITFREQ_ADD;
  293. }
  294. /* literal Length */
  295. { U32 const llCode = ZSTD_LLcode(litLength);
  296. optPtr->litLengthFreq[llCode]++;
  297. optPtr->litLengthSum++;
  298. }
  299. /* offset code : expected to follow storeSeq() numeric representation */
  300. { U32 const offCode = ZSTD_highbit32(STORED_TO_OFFBASE(offsetCode));
  301. assert(offCode <= MaxOff);
  302. optPtr->offCodeFreq[offCode]++;
  303. optPtr->offCodeSum++;
  304. }
  305. /* match Length */
  306. { U32 const mlBase = matchLength - MINMATCH;
  307. U32 const mlCode = ZSTD_MLcode(mlBase);
  308. optPtr->matchLengthFreq[mlCode]++;
  309. optPtr->matchLengthSum++;
  310. }
  311. }
  312. /* ZSTD_readMINMATCH() :
  313. * function safe only for comparisons
  314. * assumption : memPtr must be at least 4 bytes before end of buffer */
  315. MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
  316. {
  317. switch (length)
  318. {
  319. default :
  320. case 4 : return MEM_read32(memPtr);
  321. case 3 : if (MEM_isLittleEndian())
  322. return MEM_read32(memPtr)<<8;
  323. else
  324. return MEM_read32(memPtr)>>8;
  325. }
  326. }
  327. /* Update hashTable3 up to ip (excluded)
  328. Assumption : always within prefix (i.e. not within extDict) */
  329. static U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_matchState_t* ms,
  330. U32* nextToUpdate3,
  331. const BYTE* const ip)
  332. {
  333. U32* const hashTable3 = ms->hashTable3;
  334. U32 const hashLog3 = ms->hashLog3;
  335. const BYTE* const base = ms->window.base;
  336. U32 idx = *nextToUpdate3;
  337. U32 const target = (U32)(ip - base);
  338. size_t const hash3 = ZSTD_hash3Ptr(ip, hashLog3);
  339. assert(hashLog3 > 0);
  340. while(idx < target) {
  341. hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx;
  342. idx++;
  343. }
  344. *nextToUpdate3 = target;
  345. return hashTable3[hash3];
  346. }
  347. /*-*************************************
  348. * Binary Tree search
  349. ***************************************/
  350. /** ZSTD_insertBt1() : add one or multiple positions to tree.
  351. * @param ip assumed <= iend-8 .
  352. * @param target The target of ZSTD_updateTree_internal() - we are filling to this position
  353. * @return : nb of positions added */
  354. static U32 ZSTD_insertBt1(
  355. const ZSTD_matchState_t* ms,
  356. const BYTE* const ip, const BYTE* const iend,
  357. U32 const target,
  358. U32 const mls, const int extDict)
  359. {
  360. const ZSTD_compressionParameters* const cParams = &ms->cParams;
  361. U32* const hashTable = ms->hashTable;
  362. U32 const hashLog = cParams->hashLog;
  363. size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
  364. U32* const bt = ms->chainTable;
  365. U32 const btLog = cParams->chainLog - 1;
  366. U32 const btMask = (1 << btLog) - 1;
  367. U32 matchIndex = hashTable[h];
  368. size_t commonLengthSmaller=0, commonLengthLarger=0;
  369. const BYTE* const base = ms->window.base;
  370. const BYTE* const dictBase = ms->window.dictBase;
  371. const U32 dictLimit = ms->window.dictLimit;
  372. const BYTE* const dictEnd = dictBase + dictLimit;
  373. const BYTE* const prefixStart = base + dictLimit;
  374. const BYTE* match;
  375. const U32 curr = (U32)(ip-base);
  376. const U32 btLow = btMask >= curr ? 0 : curr - btMask;
  377. U32* smallerPtr = bt + 2*(curr&btMask);
  378. U32* largerPtr = smallerPtr + 1;
  379. U32 dummy32; /* to be nullified at the end */
  380. /* windowLow is based on target because
  381. * we only need positions that will be in the window at the end of the tree update.
  382. */
  383. U32 const windowLow = ZSTD_getLowestMatchIndex(ms, target, cParams->windowLog);
  384. U32 matchEndIdx = curr+8+1;
  385. size_t bestLength = 8;
  386. U32 nbCompares = 1U << cParams->searchLog;
  387. #ifdef ZSTD_C_PREDICT
  388. U32 predictedSmall = *(bt + 2*((curr-1)&btMask) + 0);
  389. U32 predictedLarge = *(bt + 2*((curr-1)&btMask) + 1);
  390. predictedSmall += (predictedSmall>0);
  391. predictedLarge += (predictedLarge>0);
  392. #endif /* ZSTD_C_PREDICT */
  393. DEBUGLOG(8, "ZSTD_insertBt1 (%u)", curr);
  394. assert(curr <= target);
  395. assert(ip <= iend-8); /* required for h calculation */
  396. hashTable[h] = curr; /* Update Hash Table */
  397. assert(windowLow > 0);
  398. for (; nbCompares && (matchIndex >= windowLow); --nbCompares) {
  399. U32* const nextPtr = bt + 2*(matchIndex & btMask);
  400. size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
  401. assert(matchIndex < curr);
  402. #ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */
  403. const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */
  404. if (matchIndex == predictedSmall) {
  405. /* no need to check length, result known */
  406. *smallerPtr = matchIndex;
  407. if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
  408. smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
  409. matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
  410. predictedSmall = predictPtr[1] + (predictPtr[1]>0);
  411. continue;
  412. }
  413. if (matchIndex == predictedLarge) {
  414. *largerPtr = matchIndex;
  415. if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
  416. largerPtr = nextPtr;
  417. matchIndex = nextPtr[0];
  418. predictedLarge = predictPtr[0] + (predictPtr[0]>0);
  419. continue;
  420. }
  421. #endif
  422. if (!extDict || (matchIndex+matchLength >= dictLimit)) {
  423. assert(matchIndex+matchLength >= dictLimit); /* might be wrong if actually extDict */
  424. match = base + matchIndex;
  425. matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
  426. } else {
  427. match = dictBase + matchIndex;
  428. matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
  429. if (matchIndex+matchLength >= dictLimit)
  430. match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
  431. }
  432. if (matchLength > bestLength) {
  433. bestLength = matchLength;
  434. if (matchLength > matchEndIdx - matchIndex)
  435. matchEndIdx = matchIndex + (U32)matchLength;
  436. }
  437. if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
  438. break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
  439. }
  440. if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */
  441. /* match is smaller than current */
  442. *smallerPtr = matchIndex; /* update smaller idx */
  443. commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
  444. if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
  445. smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */
  446. matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */
  447. } else {
  448. /* match is larger than current */
  449. *largerPtr = matchIndex;
  450. commonLengthLarger = matchLength;
  451. if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
  452. largerPtr = nextPtr;
  453. matchIndex = nextPtr[0];
  454. } }
  455. *smallerPtr = *largerPtr = 0;
  456. { U32 positions = 0;
  457. if (bestLength > 384) positions = MIN(192, (U32)(bestLength - 384)); /* speed optimization */
  458. assert(matchEndIdx > curr + 8);
  459. return MAX(positions, matchEndIdx - (curr + 8));
  460. }
  461. }
  462. FORCE_INLINE_TEMPLATE
  463. void ZSTD_updateTree_internal(
  464. ZSTD_matchState_t* ms,
  465. const BYTE* const ip, const BYTE* const iend,
  466. const U32 mls, const ZSTD_dictMode_e dictMode)
  467. {
  468. const BYTE* const base = ms->window.base;
  469. U32 const target = (U32)(ip - base);
  470. U32 idx = ms->nextToUpdate;
  471. DEBUGLOG(6, "ZSTD_updateTree_internal, from %u to %u (dictMode:%u)",
  472. idx, target, dictMode);
  473. while(idx < target) {
  474. U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, target, mls, dictMode == ZSTD_extDict);
  475. assert(idx < (U32)(idx + forward));
  476. idx += forward;
  477. }
  478. assert((size_t)(ip - base) <= (size_t)(U32)(-1));
  479. assert((size_t)(iend - base) <= (size_t)(U32)(-1));
  480. ms->nextToUpdate = target;
  481. }
  482. void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) {
  483. ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict);
  484. }
  485. FORCE_INLINE_TEMPLATE
  486. U32 ZSTD_insertBtAndGetAllMatches (
  487. ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */
  488. ZSTD_matchState_t* ms,
  489. U32* nextToUpdate3,
  490. const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode,
  491. const U32 rep[ZSTD_REP_NUM],
  492. U32 const ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */
  493. const U32 lengthToBeat,
  494. U32 const mls /* template */)
  495. {
  496. const ZSTD_compressionParameters* const cParams = &ms->cParams;
  497. U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
  498. const BYTE* const base = ms->window.base;
  499. U32 const curr = (U32)(ip-base);
  500. U32 const hashLog = cParams->hashLog;
  501. U32 const minMatch = (mls==3) ? 3 : 4;
  502. U32* const hashTable = ms->hashTable;
  503. size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
  504. U32 matchIndex = hashTable[h];
  505. U32* const bt = ms->chainTable;
  506. U32 const btLog = cParams->chainLog - 1;
  507. U32 const btMask= (1U << btLog) - 1;
  508. size_t commonLengthSmaller=0, commonLengthLarger=0;
  509. const BYTE* const dictBase = ms->window.dictBase;
  510. U32 const dictLimit = ms->window.dictLimit;
  511. const BYTE* const dictEnd = dictBase + dictLimit;
  512. const BYTE* const prefixStart = base + dictLimit;
  513. U32 const btLow = (btMask >= curr) ? 0 : curr - btMask;
  514. U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog);
  515. U32 const matchLow = windowLow ? windowLow : 1;
  516. U32* smallerPtr = bt + 2*(curr&btMask);
  517. U32* largerPtr = bt + 2*(curr&btMask) + 1;
  518. U32 matchEndIdx = curr+8+1; /* farthest referenced position of any match => detects repetitive patterns */
  519. U32 dummy32; /* to be nullified at the end */
  520. U32 mnum = 0;
  521. U32 nbCompares = 1U << cParams->searchLog;
  522. const ZSTD_matchState_t* dms = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL;
  523. const ZSTD_compressionParameters* const dmsCParams =
  524. dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL;
  525. const BYTE* const dmsBase = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL;
  526. const BYTE* const dmsEnd = dictMode == ZSTD_dictMatchState ? dms->window.nextSrc : NULL;
  527. U32 const dmsHighLimit = dictMode == ZSTD_dictMatchState ? (U32)(dmsEnd - dmsBase) : 0;
  528. U32 const dmsLowLimit = dictMode == ZSTD_dictMatchState ? dms->window.lowLimit : 0;
  529. U32 const dmsIndexDelta = dictMode == ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0;
  530. U32 const dmsHashLog = dictMode == ZSTD_dictMatchState ? dmsCParams->hashLog : hashLog;
  531. U32 const dmsBtLog = dictMode == ZSTD_dictMatchState ? dmsCParams->chainLog - 1 : btLog;
  532. U32 const dmsBtMask = dictMode == ZSTD_dictMatchState ? (1U << dmsBtLog) - 1 : 0;
  533. U32 const dmsBtLow = dictMode == ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit;
  534. size_t bestLength = lengthToBeat-1;
  535. DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", curr);
  536. /* check repCode */
  537. assert(ll0 <= 1); /* necessarily 1 or 0 */
  538. { U32 const lastR = ZSTD_REP_NUM + ll0;
  539. U32 repCode;
  540. for (repCode = ll0; repCode < lastR; repCode++) {
  541. U32 const repOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
  542. U32 const repIndex = curr - repOffset;
  543. U32 repLen = 0;
  544. assert(curr >= dictLimit);
  545. if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < curr-dictLimit) { /* equivalent to `curr > repIndex >= dictLimit` */
  546. /* We must validate the repcode offset because when we're using a dictionary the
  547. * valid offset range shrinks when the dictionary goes out of bounds.
  548. */
  549. if ((repIndex >= windowLow) & (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch))) {
  550. repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch;
  551. }
  552. } else { /* repIndex < dictLimit || repIndex >= curr */
  553. const BYTE* const repMatch = dictMode == ZSTD_dictMatchState ?
  554. dmsBase + repIndex - dmsIndexDelta :
  555. dictBase + repIndex;
  556. assert(curr >= windowLow);
  557. if ( dictMode == ZSTD_extDict
  558. && ( ((repOffset-1) /*intentional overflow*/ < curr - windowLow) /* equivalent to `curr > repIndex >= windowLow` */
  559. & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */)
  560. && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
  561. repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch;
  562. }
  563. if (dictMode == ZSTD_dictMatchState
  564. && ( ((repOffset-1) /*intentional overflow*/ < curr - (dmsLowLimit + dmsIndexDelta)) /* equivalent to `curr > repIndex >= dmsLowLimit` */
  565. & ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */
  566. && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
  567. repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch;
  568. } }
  569. /* save longer solution */
  570. if (repLen > bestLength) {
  571. DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u",
  572. repCode, ll0, repOffset, repLen);
  573. bestLength = repLen;
  574. matches[mnum].off = STORE_REPCODE(repCode - ll0 + 1); /* expect value between 1 and 3 */
  575. matches[mnum].len = (U32)repLen;
  576. mnum++;
  577. if ( (repLen > sufficient_len)
  578. | (ip+repLen == iLimit) ) { /* best possible */
  579. return mnum;
  580. } } } }
  581. /* HC3 match finder */
  582. if ((mls == 3) /*static*/ && (bestLength < mls)) {
  583. U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, nextToUpdate3, ip);
  584. if ((matchIndex3 >= matchLow)
  585. & (curr - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) {
  586. size_t mlen;
  587. if ((dictMode == ZSTD_noDict) /*static*/ || (dictMode == ZSTD_dictMatchState) /*static*/ || (matchIndex3 >= dictLimit)) {
  588. const BYTE* const match = base + matchIndex3;
  589. mlen = ZSTD_count(ip, match, iLimit);
  590. } else {
  591. const BYTE* const match = dictBase + matchIndex3;
  592. mlen = ZSTD_count_2segments(ip, match, iLimit, dictEnd, prefixStart);
  593. }
  594. /* save best solution */
  595. if (mlen >= mls /* == 3 > bestLength */) {
  596. DEBUGLOG(8, "found small match with hlog3, of length %u",
  597. (U32)mlen);
  598. bestLength = mlen;
  599. assert(curr > matchIndex3);
  600. assert(mnum==0); /* no prior solution */
  601. matches[0].off = STORE_OFFSET(curr - matchIndex3);
  602. matches[0].len = (U32)mlen;
  603. mnum = 1;
  604. if ( (mlen > sufficient_len) |
  605. (ip+mlen == iLimit) ) { /* best possible length */
  606. ms->nextToUpdate = curr+1; /* skip insertion */
  607. return 1;
  608. } } }
  609. /* no dictMatchState lookup: dicts don't have a populated HC3 table */
  610. } /* if (mls == 3) */
  611. hashTable[h] = curr; /* Update Hash Table */
  612. for (; nbCompares && (matchIndex >= matchLow); --nbCompares) {
  613. U32* const nextPtr = bt + 2*(matchIndex & btMask);
  614. const BYTE* match;
  615. size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
  616. assert(curr > matchIndex);
  617. if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) {
  618. assert(matchIndex+matchLength >= dictLimit); /* ensure the condition is correct when !extDict */
  619. match = base + matchIndex;
  620. if (matchIndex >= dictLimit) assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */
  621. matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit);
  622. } else {
  623. match = dictBase + matchIndex;
  624. assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */
  625. matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart);
  626. if (matchIndex+matchLength >= dictLimit)
  627. match = base + matchIndex; /* prepare for match[matchLength] read */
  628. }
  629. if (matchLength > bestLength) {
  630. DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)",
  631. (U32)matchLength, curr - matchIndex, STORE_OFFSET(curr - matchIndex));
  632. assert(matchEndIdx > matchIndex);
  633. if (matchLength > matchEndIdx - matchIndex)
  634. matchEndIdx = matchIndex + (U32)matchLength;
  635. bestLength = matchLength;
  636. matches[mnum].off = STORE_OFFSET(curr - matchIndex);
  637. matches[mnum].len = (U32)matchLength;
  638. mnum++;
  639. if ( (matchLength > ZSTD_OPT_NUM)
  640. | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
  641. if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */
  642. break; /* drop, to preserve bt consistency (miss a little bit of compression) */
  643. } }
  644. if (match[matchLength] < ip[matchLength]) {
  645. /* match smaller than current */
  646. *smallerPtr = matchIndex; /* update smaller idx */
  647. commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
  648. if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
  649. smallerPtr = nextPtr+1; /* new candidate => larger than match, which was smaller than current */
  650. matchIndex = nextPtr[1]; /* new matchIndex, larger than previous, closer to current */
  651. } else {
  652. *largerPtr = matchIndex;
  653. commonLengthLarger = matchLength;
  654. if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
  655. largerPtr = nextPtr;
  656. matchIndex = nextPtr[0];
  657. } }
  658. *smallerPtr = *largerPtr = 0;
  659. assert(nbCompares <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
  660. if (dictMode == ZSTD_dictMatchState && nbCompares) {
  661. size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls);
  662. U32 dictMatchIndex = dms->hashTable[dmsH];
  663. const U32* const dmsBt = dms->chainTable;
  664. commonLengthSmaller = commonLengthLarger = 0;
  665. for (; nbCompares && (dictMatchIndex > dmsLowLimit); --nbCompares) {
  666. const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask);
  667. size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
  668. const BYTE* match = dmsBase + dictMatchIndex;
  669. matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dmsEnd, prefixStart);
  670. if (dictMatchIndex+matchLength >= dmsHighLimit)
  671. match = base + dictMatchIndex + dmsIndexDelta; /* to prepare for next usage of match[matchLength] */
  672. if (matchLength > bestLength) {
  673. matchIndex = dictMatchIndex + dmsIndexDelta;
  674. DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)",
  675. (U32)matchLength, curr - matchIndex, STORE_OFFSET(curr - matchIndex));
  676. if (matchLength > matchEndIdx - matchIndex)
  677. matchEndIdx = matchIndex + (U32)matchLength;
  678. bestLength = matchLength;
  679. matches[mnum].off = STORE_OFFSET(curr - matchIndex);
  680. matches[mnum].len = (U32)matchLength;
  681. mnum++;
  682. if ( (matchLength > ZSTD_OPT_NUM)
  683. | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
  684. break; /* drop, to guarantee consistency (miss a little bit of compression) */
  685. } }
  686. if (dictMatchIndex <= dmsBtLow) { break; } /* beyond tree size, stop the search */
  687. if (match[matchLength] < ip[matchLength]) {
  688. commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
  689. dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
  690. } else {
  691. /* match is larger than current */
  692. commonLengthLarger = matchLength;
  693. dictMatchIndex = nextPtr[0];
  694. } } } /* if (dictMode == ZSTD_dictMatchState) */
  695. assert(matchEndIdx > curr+8);
  696. ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
  697. return mnum;
  698. }
  699. typedef U32 (*ZSTD_getAllMatchesFn)(
  700. ZSTD_match_t*,
  701. ZSTD_matchState_t*,
  702. U32*,
  703. const BYTE*,
  704. const BYTE*,
  705. const U32 rep[ZSTD_REP_NUM],
  706. U32 const ll0,
  707. U32 const lengthToBeat);
  708. FORCE_INLINE_TEMPLATE U32 ZSTD_btGetAllMatches_internal(
  709. ZSTD_match_t* matches,
  710. ZSTD_matchState_t* ms,
  711. U32* nextToUpdate3,
  712. const BYTE* ip,
  713. const BYTE* const iHighLimit,
  714. const U32 rep[ZSTD_REP_NUM],
  715. U32 const ll0,
  716. U32 const lengthToBeat,
  717. const ZSTD_dictMode_e dictMode,
  718. const U32 mls)
  719. {
  720. assert(BOUNDED(3, ms->cParams.minMatch, 6) == mls);
  721. DEBUGLOG(8, "ZSTD_BtGetAllMatches(dictMode=%d, mls=%u)", (int)dictMode, mls);
  722. if (ip < ms->window.base + ms->nextToUpdate)
  723. return 0; /* skipped area */
  724. ZSTD_updateTree_internal(ms, ip, iHighLimit, mls, dictMode);
  725. return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, mls);
  726. }
  727. #define ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls) ZSTD_btGetAllMatches_##dictMode##_##mls
  728. #define GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, mls) \
  729. static U32 ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls)( \
  730. ZSTD_match_t* matches, \
  731. ZSTD_matchState_t* ms, \
  732. U32* nextToUpdate3, \
  733. const BYTE* ip, \
  734. const BYTE* const iHighLimit, \
  735. const U32 rep[ZSTD_REP_NUM], \
  736. U32 const ll0, \
  737. U32 const lengthToBeat) \
  738. { \
  739. return ZSTD_btGetAllMatches_internal( \
  740. matches, ms, nextToUpdate3, ip, iHighLimit, \
  741. rep, ll0, lengthToBeat, ZSTD_##dictMode, mls); \
  742. }
  743. #define GEN_ZSTD_BT_GET_ALL_MATCHES(dictMode) \
  744. GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 3) \
  745. GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 4) \
  746. GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 5) \
  747. GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 6)
  748. GEN_ZSTD_BT_GET_ALL_MATCHES(noDict)
  749. GEN_ZSTD_BT_GET_ALL_MATCHES(extDict)
  750. GEN_ZSTD_BT_GET_ALL_MATCHES(dictMatchState)
  751. #define ZSTD_BT_GET_ALL_MATCHES_ARRAY(dictMode) \
  752. { \
  753. ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 3), \
  754. ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 4), \
  755. ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 5), \
  756. ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 6) \
  757. }
  758. static ZSTD_getAllMatchesFn
  759. ZSTD_selectBtGetAllMatches(ZSTD_matchState_t const* ms, ZSTD_dictMode_e const dictMode)
  760. {
  761. ZSTD_getAllMatchesFn const getAllMatchesFns[3][4] = {
  762. ZSTD_BT_GET_ALL_MATCHES_ARRAY(noDict),
  763. ZSTD_BT_GET_ALL_MATCHES_ARRAY(extDict),
  764. ZSTD_BT_GET_ALL_MATCHES_ARRAY(dictMatchState)
  765. };
  766. U32 const mls = BOUNDED(3, ms->cParams.minMatch, 6);
  767. assert((U32)dictMode < 3);
  768. assert(mls - 3 < 4);
  769. return getAllMatchesFns[(int)dictMode][mls - 3];
  770. }
  771. /*************************
  772. * LDM helper functions *
  773. *************************/
  774. /* Struct containing info needed to make decision about ldm inclusion */
  775. typedef struct {
  776. rawSeqStore_t seqStore; /* External match candidates store for this block */
  777. U32 startPosInBlock; /* Start position of the current match candidate */
  778. U32 endPosInBlock; /* End position of the current match candidate */
  779. U32 offset; /* Offset of the match candidate */
  780. } ZSTD_optLdm_t;
  781. /* ZSTD_optLdm_skipRawSeqStoreBytes():
  782. * Moves forward in @rawSeqStore by @nbBytes,
  783. * which will update the fields 'pos' and 'posInSequence'.
  784. */
  785. static void ZSTD_optLdm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes)
  786. {
  787. U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
  788. while (currPos && rawSeqStore->pos < rawSeqStore->size) {
  789. rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
  790. if (currPos >= currSeq.litLength + currSeq.matchLength) {
  791. currPos -= currSeq.litLength + currSeq.matchLength;
  792. rawSeqStore->pos++;
  793. } else {
  794. rawSeqStore->posInSequence = currPos;
  795. break;
  796. }
  797. }
  798. if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
  799. rawSeqStore->posInSequence = 0;
  800. }
  801. }
  802. /* ZSTD_opt_getNextMatchAndUpdateSeqStore():
  803. * Calculates the beginning and end of the next match in the current block.
  804. * Updates 'pos' and 'posInSequence' of the ldmSeqStore.
  805. */
  806. static void
  807. ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock,
  808. U32 blockBytesRemaining)
  809. {
  810. rawSeq currSeq;
  811. U32 currBlockEndPos;
  812. U32 literalsBytesRemaining;
  813. U32 matchBytesRemaining;
  814. /* Setting match end position to MAX to ensure we never use an LDM during this block */
  815. if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
  816. optLdm->startPosInBlock = UINT_MAX;
  817. optLdm->endPosInBlock = UINT_MAX;
  818. return;
  819. }
  820. /* Calculate appropriate bytes left in matchLength and litLength
  821. * after adjusting based on ldmSeqStore->posInSequence */
  822. currSeq = optLdm->seqStore.seq[optLdm->seqStore.pos];
  823. assert(optLdm->seqStore.posInSequence <= currSeq.litLength + currSeq.matchLength);
  824. currBlockEndPos = currPosInBlock + blockBytesRemaining;
  825. literalsBytesRemaining = (optLdm->seqStore.posInSequence < currSeq.litLength) ?
  826. currSeq.litLength - (U32)optLdm->seqStore.posInSequence :
  827. 0;
  828. matchBytesRemaining = (literalsBytesRemaining == 0) ?
  829. currSeq.matchLength - ((U32)optLdm->seqStore.posInSequence - currSeq.litLength) :
  830. currSeq.matchLength;
  831. /* If there are more literal bytes than bytes remaining in block, no ldm is possible */
  832. if (literalsBytesRemaining >= blockBytesRemaining) {
  833. optLdm->startPosInBlock = UINT_MAX;
  834. optLdm->endPosInBlock = UINT_MAX;
  835. ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, blockBytesRemaining);
  836. return;
  837. }
  838. /* Matches may be < MINMATCH by this process. In that case, we will reject them
  839. when we are deciding whether or not to add the ldm */
  840. optLdm->startPosInBlock = currPosInBlock + literalsBytesRemaining;
  841. optLdm->endPosInBlock = optLdm->startPosInBlock + matchBytesRemaining;
  842. optLdm->offset = currSeq.offset;
  843. if (optLdm->endPosInBlock > currBlockEndPos) {
  844. /* Match ends after the block ends, we can't use the whole match */
  845. optLdm->endPosInBlock = currBlockEndPos;
  846. ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, currBlockEndPos - currPosInBlock);
  847. } else {
  848. /* Consume nb of bytes equal to size of sequence left */
  849. ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, literalsBytesRemaining + matchBytesRemaining);
  850. }
  851. }
  852. /* ZSTD_optLdm_maybeAddMatch():
  853. * Adds a match if it's long enough,
  854. * based on it's 'matchStartPosInBlock' and 'matchEndPosInBlock',
  855. * into 'matches'. Maintains the correct ordering of 'matches'.
  856. */
  857. static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches,
  858. const ZSTD_optLdm_t* optLdm, U32 currPosInBlock)
  859. {
  860. U32 const posDiff = currPosInBlock - optLdm->startPosInBlock;
  861. /* Note: ZSTD_match_t actually contains offCode and matchLength (before subtracting MINMATCH) */
  862. U32 const candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff;
  863. /* Ensure that current block position is not outside of the match */
  864. if (currPosInBlock < optLdm->startPosInBlock
  865. || currPosInBlock >= optLdm->endPosInBlock
  866. || candidateMatchLength < MINMATCH) {
  867. return;
  868. }
  869. if (*nbMatches == 0 || ((candidateMatchLength > matches[*nbMatches-1].len) && *nbMatches < ZSTD_OPT_NUM)) {
  870. U32 const candidateOffCode = STORE_OFFSET(optLdm->offset);
  871. DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offCode: %u matchLength %u) at block position=%u",
  872. candidateOffCode, candidateMatchLength, currPosInBlock);
  873. matches[*nbMatches].len = candidateMatchLength;
  874. matches[*nbMatches].off = candidateOffCode;
  875. (*nbMatches)++;
  876. }
  877. }
  878. /* ZSTD_optLdm_processMatchCandidate():
  879. * Wrapper function to update ldm seq store and call ldm functions as necessary.
  880. */
  881. static void
  882. ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm,
  883. ZSTD_match_t* matches, U32* nbMatches,
  884. U32 currPosInBlock, U32 remainingBytes)
  885. {
  886. if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
  887. return;
  888. }
  889. if (currPosInBlock >= optLdm->endPosInBlock) {
  890. if (currPosInBlock > optLdm->endPosInBlock) {
  891. /* The position at which ZSTD_optLdm_processMatchCandidate() is called is not necessarily
  892. * at the end of a match from the ldm seq store, and will often be some bytes
  893. * over beyond matchEndPosInBlock. As such, we need to correct for these "overshoots"
  894. */
  895. U32 const posOvershoot = currPosInBlock - optLdm->endPosInBlock;
  896. ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, posOvershoot);
  897. }
  898. ZSTD_opt_getNextMatchAndUpdateSeqStore(optLdm, currPosInBlock, remainingBytes);
  899. }
  900. ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock);
  901. }
  902. /*-*******************************
  903. * Optimal parser
  904. *********************************/
  905. static U32 ZSTD_totalLen(ZSTD_optimal_t sol)
  906. {
  907. return sol.litlen + sol.mlen;
  908. }
  909. #if 0 /* debug */
  910. static void
  911. listStats(const U32* table, int lastEltID)
  912. {
  913. int const nbElts = lastEltID + 1;
  914. int enb;
  915. for (enb=0; enb < nbElts; enb++) {
  916. (void)table;
  917. /* RAWLOG(2, "%3i:%3i, ", enb, table[enb]); */
  918. RAWLOG(2, "%4i,", table[enb]);
  919. }
  920. RAWLOG(2, " \n");
  921. }
  922. #endif
  923. FORCE_INLINE_TEMPLATE size_t
  924. ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
  925. seqStore_t* seqStore,
  926. U32 rep[ZSTD_REP_NUM],
  927. const void* src, size_t srcSize,
  928. const int optLevel,
  929. const ZSTD_dictMode_e dictMode)
  930. {
  931. optState_t* const optStatePtr = &ms->opt;
  932. const BYTE* const istart = (const BYTE*)src;
  933. const BYTE* ip = istart;
  934. const BYTE* anchor = istart;
  935. const BYTE* const iend = istart + srcSize;
  936. const BYTE* const ilimit = iend - 8;
  937. const BYTE* const base = ms->window.base;
  938. const BYTE* const prefixStart = base + ms->window.dictLimit;
  939. const ZSTD_compressionParameters* const cParams = &ms->cParams;
  940. ZSTD_getAllMatchesFn getAllMatches = ZSTD_selectBtGetAllMatches(ms, dictMode);
  941. U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
  942. U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4;
  943. U32 nextToUpdate3 = ms->nextToUpdate;
  944. ZSTD_optimal_t* const opt = optStatePtr->priceTable;
  945. ZSTD_match_t* const matches = optStatePtr->matchTable;
  946. ZSTD_optimal_t lastSequence;
  947. ZSTD_optLdm_t optLdm;
  948. optLdm.seqStore = ms->ldmSeqStore ? *ms->ldmSeqStore : kNullRawSeqStore;
  949. optLdm.endPosInBlock = optLdm.startPosInBlock = optLdm.offset = 0;
  950. ZSTD_opt_getNextMatchAndUpdateSeqStore(&optLdm, (U32)(ip-istart), (U32)(iend-ip));
  951. /* init */
  952. DEBUGLOG(5, "ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u",
  953. (U32)(ip - base), ms->window.dictLimit, ms->nextToUpdate);
  954. assert(optLevel <= 2);
  955. ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize, optLevel);
  956. ip += (ip==prefixStart);
  957. /* Match Loop */
  958. while (ip < ilimit) {
  959. U32 cur, last_pos = 0;
  960. /* find first match */
  961. { U32 const litlen = (U32)(ip - anchor);
  962. U32 const ll0 = !litlen;
  963. U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, ip, iend, rep, ll0, minMatch);
  964. ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
  965. (U32)(ip-istart), (U32)(iend - ip));
  966. if (!nbMatches) { ip++; continue; }
  967. /* initialize opt[0] */
  968. { U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
  969. opt[0].mlen = 0; /* means is_a_literal */
  970. opt[0].litlen = litlen;
  971. /* We don't need to include the actual price of the literals because
  972. * it is static for the duration of the forward pass, and is included
  973. * in every price. We include the literal length to avoid negative
  974. * prices when we subtract the previous literal length.
  975. */
  976. opt[0].price = (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel);
  977. /* large match -> immediate encoding */
  978. { U32 const maxML = matches[nbMatches-1].len;
  979. U32 const maxOffcode = matches[nbMatches-1].off;
  980. DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series",
  981. nbMatches, maxML, maxOffcode, (U32)(ip-prefixStart));
  982. if (maxML > sufficient_len) {
  983. lastSequence.litlen = litlen;
  984. lastSequence.mlen = maxML;
  985. lastSequence.off = maxOffcode;
  986. DEBUGLOG(6, "large match (%u>%u), immediate encoding",
  987. maxML, sufficient_len);
  988. cur = 0;
  989. last_pos = ZSTD_totalLen(lastSequence);
  990. goto _shortestPath;
  991. } }
  992. /* set prices for first matches starting position == 0 */
  993. assert(opt[0].price >= 0);
  994. { U32 const literalsPrice = (U32)opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
  995. U32 pos;
  996. U32 matchNb;
  997. for (pos = 1; pos < minMatch; pos++) {
  998. opt[pos].price = ZSTD_MAX_PRICE; /* mlen, litlen and price will be fixed during forward scanning */
  999. }
  1000. for (matchNb = 0; matchNb < nbMatches; matchNb++) {
  1001. U32 const offcode = matches[matchNb].off;
  1002. U32 const end = matches[matchNb].len;
  1003. for ( ; pos <= end ; pos++ ) {
  1004. U32 const matchPrice = ZSTD_getMatchPrice(offcode, pos, optStatePtr, optLevel);
  1005. U32 const sequencePrice = literalsPrice + matchPrice;
  1006. DEBUGLOG(7, "rPos:%u => set initial price : %.2f",
  1007. pos, ZSTD_fCost(sequencePrice));
  1008. opt[pos].mlen = pos;
  1009. opt[pos].off = offcode;
  1010. opt[pos].litlen = litlen;
  1011. opt[pos].price = (int)sequencePrice;
  1012. } }
  1013. last_pos = pos-1;
  1014. }
  1015. }
  1016. /* check further positions */
  1017. for (cur = 1; cur <= last_pos; cur++) {
  1018. const BYTE* const inr = ip + cur;
  1019. assert(cur < ZSTD_OPT_NUM);
  1020. DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur)
  1021. /* Fix current position with one literal if cheaper */
  1022. { U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1;
  1023. int const price = opt[cur-1].price
  1024. + (int)ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel)
  1025. + (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel)
  1026. - (int)ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel);
  1027. assert(price < 1000000000); /* overflow check */
  1028. if (price <= opt[cur].price) {
  1029. DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)",
  1030. inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen,
  1031. opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]);
  1032. opt[cur].mlen = 0;
  1033. opt[cur].off = 0;
  1034. opt[cur].litlen = litlen;
  1035. opt[cur].price = price;
  1036. } else {
  1037. DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)",
  1038. inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price),
  1039. opt[cur].rep[0], opt[cur].rep[1], opt[cur].rep[2]);
  1040. }
  1041. }
  1042. /* Set the repcodes of the current position. We must do it here
  1043. * because we rely on the repcodes of the 2nd to last sequence being
  1044. * correct to set the next chunks repcodes during the backward
  1045. * traversal.
  1046. */
  1047. ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(repcodes_t));
  1048. assert(cur >= opt[cur].mlen);
  1049. if (opt[cur].mlen != 0) {
  1050. U32 const prev = cur - opt[cur].mlen;
  1051. repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, opt[cur].off, opt[cur].litlen==0);
  1052. ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t));
  1053. } else {
  1054. ZSTD_memcpy(opt[cur].rep, opt[cur - 1].rep, sizeof(repcodes_t));
  1055. }
  1056. /* last match must start at a minimum distance of 8 from oend */
  1057. if (inr > ilimit) continue;
  1058. if (cur == last_pos) break;
  1059. if ( (optLevel==0) /*static_test*/
  1060. && (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) {
  1061. DEBUGLOG(7, "move to next rPos:%u : price is <=", cur+1);
  1062. continue; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */
  1063. }
  1064. assert(opt[cur].price >= 0);
  1065. { U32 const ll0 = (opt[cur].mlen != 0);
  1066. U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0;
  1067. U32 const previousPrice = (U32)opt[cur].price;
  1068. U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
  1069. U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, inr, iend, opt[cur].rep, ll0, minMatch);
  1070. U32 matchNb;
  1071. ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
  1072. (U32)(inr-istart), (U32)(iend-inr));
  1073. if (!nbMatches) {
  1074. DEBUGLOG(7, "rPos:%u : no match found", cur);
  1075. continue;
  1076. }
  1077. { U32 const maxML = matches[nbMatches-1].len;
  1078. DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of maxLength=%u",
  1079. inr-istart, cur, nbMatches, maxML);
  1080. if ( (maxML > sufficient_len)
  1081. || (cur + maxML >= ZSTD_OPT_NUM) ) {
  1082. lastSequence.mlen = maxML;
  1083. lastSequence.off = matches[nbMatches-1].off;
  1084. lastSequence.litlen = litlen;
  1085. cur -= (opt[cur].mlen==0) ? opt[cur].litlen : 0; /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */
  1086. last_pos = cur + ZSTD_totalLen(lastSequence);
  1087. if (cur > ZSTD_OPT_NUM) cur = 0; /* underflow => first match */
  1088. goto _shortestPath;
  1089. } }
  1090. /* set prices using matches found at position == cur */
  1091. for (matchNb = 0; matchNb < nbMatches; matchNb++) {
  1092. U32 const offset = matches[matchNb].off;
  1093. U32 const lastML = matches[matchNb].len;
  1094. U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch;
  1095. U32 mlen;
  1096. DEBUGLOG(7, "testing match %u => offCode=%4u, mlen=%2u, llen=%2u",
  1097. matchNb, matches[matchNb].off, lastML, litlen);
  1098. for (mlen = lastML; mlen >= startML; mlen--) { /* scan downward */
  1099. U32 const pos = cur + mlen;
  1100. int const price = (int)basePrice + (int)ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
  1101. if ((pos > last_pos) || (price < opt[pos].price)) {
  1102. DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)",
  1103. pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
  1104. while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } /* fill empty positions */
  1105. opt[pos].mlen = mlen;
  1106. opt[pos].off = offset;
  1107. opt[pos].litlen = litlen;
  1108. opt[pos].price = price;
  1109. } else {
  1110. DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)",
  1111. pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
  1112. if (optLevel==0) break; /* early update abort; gets ~+10% speed for about -0.01 ratio loss */
  1113. }
  1114. } } }
  1115. } /* for (cur = 1; cur <= last_pos; cur++) */
  1116. lastSequence = opt[last_pos];
  1117. cur = last_pos > ZSTD_totalLen(lastSequence) ? last_pos - ZSTD_totalLen(lastSequence) : 0; /* single sequence, and it starts before `ip` */
  1118. assert(cur < ZSTD_OPT_NUM); /* control overflow*/
  1119. _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
  1120. assert(opt[0].mlen == 0);
  1121. /* Set the next chunk's repcodes based on the repcodes of the beginning
  1122. * of the last match, and the last sequence. This avoids us having to
  1123. * update them while traversing the sequences.
  1124. */
  1125. if (lastSequence.mlen != 0) {
  1126. repcodes_t const reps = ZSTD_newRep(opt[cur].rep, lastSequence.off, lastSequence.litlen==0);
  1127. ZSTD_memcpy(rep, &reps, sizeof(reps));
  1128. } else {
  1129. ZSTD_memcpy(rep, opt[cur].rep, sizeof(repcodes_t));
  1130. }
  1131. { U32 const storeEnd = cur + 1;
  1132. U32 storeStart = storeEnd;
  1133. U32 seqPos = cur;
  1134. DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)",
  1135. last_pos, cur); (void)last_pos;
  1136. assert(storeEnd < ZSTD_OPT_NUM);
  1137. DEBUGLOG(6, "last sequence copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
  1138. storeEnd, lastSequence.litlen, lastSequence.mlen, lastSequence.off);
  1139. opt[storeEnd] = lastSequence;
  1140. while (seqPos > 0) {
  1141. U32 const backDist = ZSTD_totalLen(opt[seqPos]);
  1142. storeStart--;
  1143. DEBUGLOG(6, "sequence from rPos=%u copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
  1144. seqPos, storeStart, opt[seqPos].litlen, opt[seqPos].mlen, opt[seqPos].off);
  1145. opt[storeStart] = opt[seqPos];
  1146. seqPos = (seqPos > backDist) ? seqPos - backDist : 0;
  1147. }
  1148. /* save sequences */
  1149. DEBUGLOG(6, "sending selected sequences into seqStore")
  1150. { U32 storePos;
  1151. for (storePos=storeStart; storePos <= storeEnd; storePos++) {
  1152. U32 const llen = opt[storePos].litlen;
  1153. U32 const mlen = opt[storePos].mlen;
  1154. U32 const offCode = opt[storePos].off;
  1155. U32 const advance = llen + mlen;
  1156. DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u",
  1157. anchor - istart, (unsigned)llen, (unsigned)mlen);
  1158. if (mlen==0) { /* only literals => must be last "sequence", actually starting a new stream of sequences */
  1159. assert(storePos == storeEnd); /* must be last sequence */
  1160. ip = anchor + llen; /* last "sequence" is a bunch of literals => don't progress anchor */
  1161. continue; /* will finish */
  1162. }
  1163. assert(anchor + llen <= iend);
  1164. ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen);
  1165. ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen);
  1166. anchor += advance;
  1167. ip = anchor;
  1168. } }
  1169. ZSTD_setBasePrices(optStatePtr, optLevel);
  1170. }
  1171. } /* while (ip < ilimit) */
  1172. /* Return the last literals size */
  1173. return (size_t)(iend - anchor);
  1174. }
  1175. static size_t ZSTD_compressBlock_opt0(
  1176. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1177. const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode)
  1178. {
  1179. return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /* optLevel */, dictMode);
  1180. }
  1181. static size_t ZSTD_compressBlock_opt2(
  1182. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1183. const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode)
  1184. {
  1185. return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /* optLevel */, dictMode);
  1186. }
  1187. size_t ZSTD_compressBlock_btopt(
  1188. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1189. const void* src, size_t srcSize)
  1190. {
  1191. DEBUGLOG(5, "ZSTD_compressBlock_btopt");
  1192. return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
  1193. }
  1194. /* ZSTD_initStats_ultra():
  1195. * make a first compression pass, just to seed stats with more accurate starting values.
  1196. * only works on first block, with no dictionary and no ldm.
  1197. * this function cannot error, hence its contract must be respected.
  1198. */
  1199. static void
  1200. ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
  1201. seqStore_t* seqStore,
  1202. U32 rep[ZSTD_REP_NUM],
  1203. const void* src, size_t srcSize)
  1204. {
  1205. U32 tmpRep[ZSTD_REP_NUM]; /* updated rep codes will sink here */
  1206. ZSTD_memcpy(tmpRep, rep, sizeof(tmpRep));
  1207. DEBUGLOG(4, "ZSTD_initStats_ultra (srcSize=%zu)", srcSize);
  1208. assert(ms->opt.litLengthSum == 0); /* first block */
  1209. assert(seqStore->sequences == seqStore->sequencesStart); /* no ldm */
  1210. assert(ms->window.dictLimit == ms->window.lowLimit); /* no dictionary */
  1211. assert(ms->window.dictLimit - ms->nextToUpdate <= 1); /* no prefix (note: intentional overflow, defined as 2-complement) */
  1212. ZSTD_compressBlock_opt2(ms, seqStore, tmpRep, src, srcSize, ZSTD_noDict); /* generate stats into ms->opt*/
  1213. /* invalidate first scan from history */
  1214. ZSTD_resetSeqStore(seqStore);
  1215. ms->window.base -= srcSize;
  1216. ms->window.dictLimit += (U32)srcSize;
  1217. ms->window.lowLimit = ms->window.dictLimit;
  1218. ms->nextToUpdate = ms->window.dictLimit;
  1219. }
  1220. size_t ZSTD_compressBlock_btultra(
  1221. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1222. const void* src, size_t srcSize)
  1223. {
  1224. DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize);
  1225. return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
  1226. }
  1227. size_t ZSTD_compressBlock_btultra2(
  1228. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1229. const void* src, size_t srcSize)
  1230. {
  1231. U32 const curr = (U32)((const BYTE*)src - ms->window.base);
  1232. DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize);
  1233. /* 2-pass strategy:
  1234. * this strategy makes a first pass over first block to collect statistics
  1235. * and seed next round's statistics with it.
  1236. * After 1st pass, function forgets everything, and starts a new block.
  1237. * Consequently, this can only work if no data has been previously loaded in tables,
  1238. * aka, no dictionary, no prefix, no ldm preprocessing.
  1239. * The compression ratio gain is generally small (~0.5% on first block),
  1240. * the cost is 2x cpu time on first block. */
  1241. assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
  1242. if ( (ms->opt.litLengthSum==0) /* first block */
  1243. && (seqStore->sequences == seqStore->sequencesStart) /* no ldm */
  1244. && (ms->window.dictLimit == ms->window.lowLimit) /* no dictionary */
  1245. && (curr == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */
  1246. && (srcSize > ZSTD_PREDEF_THRESHOLD)
  1247. ) {
  1248. ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize);
  1249. }
  1250. return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
  1251. }
  1252. size_t ZSTD_compressBlock_btopt_dictMatchState(
  1253. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1254. const void* src, size_t srcSize)
  1255. {
  1256. return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState);
  1257. }
  1258. size_t ZSTD_compressBlock_btultra_dictMatchState(
  1259. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1260. const void* src, size_t srcSize)
  1261. {
  1262. return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState);
  1263. }
  1264. size_t ZSTD_compressBlock_btopt_extDict(
  1265. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1266. const void* src, size_t srcSize)
  1267. {
  1268. return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_extDict);
  1269. }
  1270. size_t ZSTD_compressBlock_btultra_extDict(
  1271. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1272. const void* src, size_t srcSize)
  1273. {
  1274. return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_extDict);
  1275. }
  1276. /* note : no btultra2 variant for extDict nor dictMatchState,
  1277. * because btultra2 is not meant to work with dictionaries
  1278. * and is only specific for the first block (no prefix) */