zstd_ldm.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660
  1. /*
  2. * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
  3. * All rights reserved.
  4. *
  5. * This source code is licensed under both the BSD-style license (found in the
  6. * LICENSE file in the root directory of this source tree) and the GPLv2 (found
  7. * in the COPYING file in the root directory of this source tree).
  8. * You may select, at your option, one of the above-listed licenses.
  9. */
  10. #include "zstd_ldm.h"
  11. #include "../common/debug.h"
  12. #include "zstd_fast.h" /* ZSTD_fillHashTable() */
  13. #include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */
  14. #define LDM_BUCKET_SIZE_LOG 3
  15. #define LDM_MIN_MATCH_LENGTH 64
  16. #define LDM_HASH_RLOG 7
  17. #define LDM_HASH_CHAR_OFFSET 10
  18. void ZSTD_ldm_adjustParameters(ldmParams_t* params,
  19. ZSTD_compressionParameters const* cParams)
  20. {
  21. params->windowLog = cParams->windowLog;
  22. ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
  23. DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
  24. if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
  25. if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH;
  26. if (params->hashLog == 0) {
  27. params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG);
  28. assert(params->hashLog <= ZSTD_HASHLOG_MAX);
  29. }
  30. if (params->hashRateLog == 0) {
  31. params->hashRateLog = params->windowLog < params->hashLog
  32. ? 0
  33. : params->windowLog - params->hashLog;
  34. }
  35. params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
  36. }
  37. size_t ZSTD_ldm_getTableSize(ldmParams_t params)
  38. {
  39. size_t const ldmHSize = ((size_t)1) << params.hashLog;
  40. size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog);
  41. size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
  42. size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize)
  43. + ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t));
  44. return params.enableLdm ? totalSize : 0;
  45. }
  46. size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
  47. {
  48. return params.enableLdm ? (maxChunkSize / params.minMatchLength) : 0;
  49. }
  50. /** ZSTD_ldm_getSmallHash() :
  51. * numBits should be <= 32
  52. * If numBits==0, returns 0.
  53. * @return : the most significant numBits of value. */
  54. static U32 ZSTD_ldm_getSmallHash(U64 value, U32 numBits)
  55. {
  56. assert(numBits <= 32);
  57. return numBits == 0 ? 0 : (U32)(value >> (64 - numBits));
  58. }
  59. /** ZSTD_ldm_getChecksum() :
  60. * numBitsToDiscard should be <= 32
  61. * @return : the next most significant 32 bits after numBitsToDiscard */
  62. static U32 ZSTD_ldm_getChecksum(U64 hash, U32 numBitsToDiscard)
  63. {
  64. assert(numBitsToDiscard <= 32);
  65. return (hash >> (64 - 32 - numBitsToDiscard)) & 0xFFFFFFFF;
  66. }
  67. /** ZSTD_ldm_getTag() ;
  68. * Given the hash, returns the most significant numTagBits bits
  69. * after (32 + hbits) bits.
  70. *
  71. * If there are not enough bits remaining, return the last
  72. * numTagBits bits. */
  73. static U32 ZSTD_ldm_getTag(U64 hash, U32 hbits, U32 numTagBits)
  74. {
  75. assert(numTagBits < 32 && hbits <= 32);
  76. if (32 - hbits < numTagBits) {
  77. return hash & (((U32)1 << numTagBits) - 1);
  78. } else {
  79. return (hash >> (32 - hbits - numTagBits)) & (((U32)1 << numTagBits) - 1);
  80. }
  81. }
  82. /** ZSTD_ldm_getBucket() :
  83. * Returns a pointer to the start of the bucket associated with hash. */
  84. static ldmEntry_t* ZSTD_ldm_getBucket(
  85. ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)
  86. {
  87. return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);
  88. }
  89. /** ZSTD_ldm_insertEntry() :
  90. * Insert the entry with corresponding hash into the hash table */
  91. static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
  92. size_t const hash, const ldmEntry_t entry,
  93. ldmParams_t const ldmParams)
  94. {
  95. BYTE* const bucketOffsets = ldmState->bucketOffsets;
  96. *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + bucketOffsets[hash]) = entry;
  97. bucketOffsets[hash]++;
  98. bucketOffsets[hash] &= ((U32)1 << ldmParams.bucketSizeLog) - 1;
  99. }
  100. /** ZSTD_ldm_makeEntryAndInsertByTag() :
  101. *
  102. * Gets the small hash, checksum, and tag from the rollingHash.
  103. *
  104. * If the tag matches (1 << ldmParams.hashRateLog)-1, then
  105. * creates an ldmEntry from the offset, and inserts it into the hash table.
  106. *
  107. * hBits is the length of the small hash, which is the most significant hBits
  108. * of rollingHash. The checksum is the next 32 most significant bits, followed
  109. * by ldmParams.hashRateLog bits that make up the tag. */
  110. static void ZSTD_ldm_makeEntryAndInsertByTag(ldmState_t* ldmState,
  111. U64 const rollingHash,
  112. U32 const hBits,
  113. U32 const offset,
  114. ldmParams_t const ldmParams)
  115. {
  116. U32 const tag = ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashRateLog);
  117. U32 const tagMask = ((U32)1 << ldmParams.hashRateLog) - 1;
  118. if (tag == tagMask) {
  119. U32 const hash = ZSTD_ldm_getSmallHash(rollingHash, hBits);
  120. U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
  121. ldmEntry_t entry;
  122. entry.offset = offset;
  123. entry.checksum = checksum;
  124. ZSTD_ldm_insertEntry(ldmState, hash, entry, ldmParams);
  125. }
  126. }
  127. /** ZSTD_ldm_countBackwardsMatch() :
  128. * Returns the number of bytes that match backwards before pIn and pMatch.
  129. *
  130. * We count only bytes where pMatch >= pBase and pIn >= pAnchor. */
  131. static size_t ZSTD_ldm_countBackwardsMatch(
  132. const BYTE* pIn, const BYTE* pAnchor,
  133. const BYTE* pMatch, const BYTE* pMatchBase)
  134. {
  135. size_t matchLength = 0;
  136. while (pIn > pAnchor && pMatch > pMatchBase && pIn[-1] == pMatch[-1]) {
  137. pIn--;
  138. pMatch--;
  139. matchLength++;
  140. }
  141. return matchLength;
  142. }
  143. /** ZSTD_ldm_countBackwardsMatch_2segments() :
  144. * Returns the number of bytes that match backwards from pMatch,
  145. * even with the backwards match spanning 2 different segments.
  146. *
  147. * On reaching `pMatchBase`, start counting from mEnd */
  148. static size_t ZSTD_ldm_countBackwardsMatch_2segments(
  149. const BYTE* pIn, const BYTE* pAnchor,
  150. const BYTE* pMatch, const BYTE* pMatchBase,
  151. const BYTE* pExtDictStart, const BYTE* pExtDictEnd)
  152. {
  153. size_t matchLength = ZSTD_ldm_countBackwardsMatch(pIn, pAnchor, pMatch, pMatchBase);
  154. if (pMatch - matchLength != pMatchBase || pMatchBase == pExtDictStart) {
  155. /* If backwards match is entirely in the extDict or prefix, immediately return */
  156. return matchLength;
  157. }
  158. DEBUGLOG(7, "ZSTD_ldm_countBackwardsMatch_2segments: found 2-parts backwards match (length in prefix==%zu)", matchLength);
  159. matchLength += ZSTD_ldm_countBackwardsMatch(pIn - matchLength, pAnchor, pExtDictEnd, pExtDictStart);
  160. DEBUGLOG(7, "final backwards match length = %zu", matchLength);
  161. return matchLength;
  162. }
  163. /** ZSTD_ldm_fillFastTables() :
  164. *
  165. * Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies.
  166. * This is similar to ZSTD_loadDictionaryContent.
  167. *
  168. * The tables for the other strategies are filled within their
  169. * block compressors. */
  170. static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
  171. void const* end)
  172. {
  173. const BYTE* const iend = (const BYTE*)end;
  174. switch(ms->cParams.strategy)
  175. {
  176. case ZSTD_fast:
  177. ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast);
  178. break;
  179. case ZSTD_dfast:
  180. ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast);
  181. break;
  182. case ZSTD_greedy:
  183. case ZSTD_lazy:
  184. case ZSTD_lazy2:
  185. case ZSTD_btlazy2:
  186. case ZSTD_btopt:
  187. case ZSTD_btultra:
  188. case ZSTD_btultra2:
  189. break;
  190. default:
  191. assert(0); /* not possible : not a valid strategy id */
  192. }
  193. return 0;
  194. }
  195. /** ZSTD_ldm_fillLdmHashTable() :
  196. *
  197. * Fills hashTable from (lastHashed + 1) to iend (non-inclusive).
  198. * lastHash is the rolling hash that corresponds to lastHashed.
  199. *
  200. * Returns the rolling hash corresponding to position iend-1. */
  201. static U64 ZSTD_ldm_fillLdmHashTable(ldmState_t* state,
  202. U64 lastHash, const BYTE* lastHashed,
  203. const BYTE* iend, const BYTE* base,
  204. U32 hBits, ldmParams_t const ldmParams)
  205. {
  206. U64 rollingHash = lastHash;
  207. const BYTE* cur = lastHashed + 1;
  208. while (cur < iend) {
  209. rollingHash = ZSTD_rollingHash_rotate(rollingHash, cur[-1],
  210. cur[ldmParams.minMatchLength-1],
  211. state->hashPower);
  212. ZSTD_ldm_makeEntryAndInsertByTag(state,
  213. rollingHash, hBits,
  214. (U32)(cur - base), ldmParams);
  215. ++cur;
  216. }
  217. return rollingHash;
  218. }
  219. void ZSTD_ldm_fillHashTable(
  220. ldmState_t* state, const BYTE* ip,
  221. const BYTE* iend, ldmParams_t const* params)
  222. {
  223. DEBUGLOG(5, "ZSTD_ldm_fillHashTable");
  224. if ((size_t)(iend - ip) >= params->minMatchLength) {
  225. U64 startingHash = ZSTD_rollingHash_compute(ip, params->minMatchLength);
  226. ZSTD_ldm_fillLdmHashTable(
  227. state, startingHash, ip, iend - params->minMatchLength, state->window.base,
  228. params->hashLog - params->bucketSizeLog,
  229. *params);
  230. }
  231. }
  232. /** ZSTD_ldm_limitTableUpdate() :
  233. *
  234. * Sets cctx->nextToUpdate to a position corresponding closer to anchor
  235. * if it is far way
  236. * (after a long match, only update tables a limited amount). */
  237. static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
  238. {
  239. U32 const curr = (U32)(anchor - ms->window.base);
  240. if (curr > ms->nextToUpdate + 1024) {
  241. ms->nextToUpdate =
  242. curr - MIN(512, curr - ms->nextToUpdate - 1024);
  243. }
  244. }
  245. static size_t ZSTD_ldm_generateSequences_internal(
  246. ldmState_t* ldmState, rawSeqStore_t* rawSeqStore,
  247. ldmParams_t const* params, void const* src, size_t srcSize)
  248. {
  249. /* LDM parameters */
  250. int const extDict = ZSTD_window_hasExtDict(ldmState->window);
  251. U32 const minMatchLength = params->minMatchLength;
  252. U64 const hashPower = ldmState->hashPower;
  253. U32 const hBits = params->hashLog - params->bucketSizeLog;
  254. U32 const ldmBucketSize = 1U << params->bucketSizeLog;
  255. U32 const hashRateLog = params->hashRateLog;
  256. U32 const ldmTagMask = (1U << params->hashRateLog) - 1;
  257. /* Prefix and extDict parameters */
  258. U32 const dictLimit = ldmState->window.dictLimit;
  259. U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit;
  260. BYTE const* const base = ldmState->window.base;
  261. BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL;
  262. BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL;
  263. BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL;
  264. BYTE const* const lowPrefixPtr = base + dictLimit;
  265. /* Input bounds */
  266. BYTE const* const istart = (BYTE const*)src;
  267. BYTE const* const iend = istart + srcSize;
  268. BYTE const* const ilimit = iend - MAX(minMatchLength, HASH_READ_SIZE);
  269. /* Input positions */
  270. BYTE const* anchor = istart;
  271. BYTE const* ip = istart;
  272. /* Rolling hash */
  273. BYTE const* lastHashed = NULL;
  274. U64 rollingHash = 0;
  275. while (ip <= ilimit) {
  276. size_t mLength;
  277. U32 const curr = (U32)(ip - base);
  278. size_t forwardMatchLength = 0, backwardMatchLength = 0;
  279. ldmEntry_t* bestEntry = NULL;
  280. if (ip != istart) {
  281. rollingHash = ZSTD_rollingHash_rotate(rollingHash, lastHashed[0],
  282. lastHashed[minMatchLength],
  283. hashPower);
  284. } else {
  285. rollingHash = ZSTD_rollingHash_compute(ip, minMatchLength);
  286. }
  287. lastHashed = ip;
  288. /* Do not insert and do not look for a match */
  289. if (ZSTD_ldm_getTag(rollingHash, hBits, hashRateLog) != ldmTagMask) {
  290. ip++;
  291. continue;
  292. }
  293. /* Get the best entry and compute the match lengths */
  294. {
  295. ldmEntry_t* const bucket =
  296. ZSTD_ldm_getBucket(ldmState,
  297. ZSTD_ldm_getSmallHash(rollingHash, hBits),
  298. *params);
  299. ldmEntry_t* cur;
  300. size_t bestMatchLength = 0;
  301. U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
  302. for (cur = bucket; cur < bucket + ldmBucketSize; ++cur) {
  303. size_t curForwardMatchLength, curBackwardMatchLength,
  304. curTotalMatchLength;
  305. if (cur->checksum != checksum || cur->offset <= lowestIndex) {
  306. continue;
  307. }
  308. if (extDict) {
  309. BYTE const* const curMatchBase =
  310. cur->offset < dictLimit ? dictBase : base;
  311. BYTE const* const pMatch = curMatchBase + cur->offset;
  312. BYTE const* const matchEnd =
  313. cur->offset < dictLimit ? dictEnd : iend;
  314. BYTE const* const lowMatchPtr =
  315. cur->offset < dictLimit ? dictStart : lowPrefixPtr;
  316. curForwardMatchLength = ZSTD_count_2segments(
  317. ip, pMatch, iend,
  318. matchEnd, lowPrefixPtr);
  319. if (curForwardMatchLength < minMatchLength) {
  320. continue;
  321. }
  322. curBackwardMatchLength =
  323. ZSTD_ldm_countBackwardsMatch_2segments(ip, anchor,
  324. pMatch, lowMatchPtr,
  325. dictStart, dictEnd);
  326. curTotalMatchLength = curForwardMatchLength +
  327. curBackwardMatchLength;
  328. } else { /* !extDict */
  329. BYTE const* const pMatch = base + cur->offset;
  330. curForwardMatchLength = ZSTD_count(ip, pMatch, iend);
  331. if (curForwardMatchLength < minMatchLength) {
  332. continue;
  333. }
  334. curBackwardMatchLength =
  335. ZSTD_ldm_countBackwardsMatch(ip, anchor, pMatch,
  336. lowPrefixPtr);
  337. curTotalMatchLength = curForwardMatchLength +
  338. curBackwardMatchLength;
  339. }
  340. if (curTotalMatchLength > bestMatchLength) {
  341. bestMatchLength = curTotalMatchLength;
  342. forwardMatchLength = curForwardMatchLength;
  343. backwardMatchLength = curBackwardMatchLength;
  344. bestEntry = cur;
  345. }
  346. }
  347. }
  348. /* No match found -- continue searching */
  349. if (bestEntry == NULL) {
  350. ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash,
  351. hBits, curr,
  352. *params);
  353. ip++;
  354. continue;
  355. }
  356. /* Match found */
  357. mLength = forwardMatchLength + backwardMatchLength;
  358. ip -= backwardMatchLength;
  359. {
  360. /* Store the sequence:
  361. * ip = curr - backwardMatchLength
  362. * The match is at (bestEntry->offset - backwardMatchLength)
  363. */
  364. U32 const matchIndex = bestEntry->offset;
  365. U32 const offset = curr - matchIndex;
  366. rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
  367. /* Out of sequence storage */
  368. if (rawSeqStore->size == rawSeqStore->capacity)
  369. return ERROR(dstSize_tooSmall);
  370. seq->litLength = (U32)(ip - anchor);
  371. seq->matchLength = (U32)mLength;
  372. seq->offset = offset;
  373. rawSeqStore->size++;
  374. }
  375. /* Insert the current entry into the hash table */
  376. ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits,
  377. (U32)(lastHashed - base),
  378. *params);
  379. assert(ip + backwardMatchLength == lastHashed);
  380. /* Fill the hash table from lastHashed+1 to ip+mLength*/
  381. /* Heuristic: don't need to fill the entire table at end of block */
  382. if (ip + mLength <= ilimit) {
  383. rollingHash = ZSTD_ldm_fillLdmHashTable(
  384. ldmState, rollingHash, lastHashed,
  385. ip + mLength, base, hBits, *params);
  386. lastHashed = ip + mLength - 1;
  387. }
  388. ip += mLength;
  389. anchor = ip;
  390. }
  391. return iend - anchor;
  392. }
  393. /*! ZSTD_ldm_reduceTable() :
  394. * reduce table indexes by `reducerValue` */
  395. static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
  396. U32 const reducerValue)
  397. {
  398. U32 u;
  399. for (u = 0; u < size; u++) {
  400. if (table[u].offset < reducerValue) table[u].offset = 0;
  401. else table[u].offset -= reducerValue;
  402. }
  403. }
  404. size_t ZSTD_ldm_generateSequences(
  405. ldmState_t* ldmState, rawSeqStore_t* sequences,
  406. ldmParams_t const* params, void const* src, size_t srcSize)
  407. {
  408. U32 const maxDist = 1U << params->windowLog;
  409. BYTE const* const istart = (BYTE const*)src;
  410. BYTE const* const iend = istart + srcSize;
  411. size_t const kMaxChunkSize = 1 << 20;
  412. size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0);
  413. size_t chunk;
  414. size_t leftoverSize = 0;
  415. assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize);
  416. /* Check that ZSTD_window_update() has been called for this chunk prior
  417. * to passing it to this function.
  418. */
  419. assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
  420. /* The input could be very large (in zstdmt), so it must be broken up into
  421. * chunks to enforce the maximum distance and handle overflow correction.
  422. */
  423. assert(sequences->pos <= sequences->size);
  424. assert(sequences->size <= sequences->capacity);
  425. for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) {
  426. BYTE const* const chunkStart = istart + chunk * kMaxChunkSize;
  427. size_t const remaining = (size_t)(iend - chunkStart);
  428. BYTE const *const chunkEnd =
  429. (remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize;
  430. size_t const chunkSize = chunkEnd - chunkStart;
  431. size_t newLeftoverSize;
  432. size_t const prevSize = sequences->size;
  433. assert(chunkStart < iend);
  434. /* 1. Perform overflow correction if necessary. */
  435. if (ZSTD_window_needOverflowCorrection(ldmState->window, chunkEnd)) {
  436. U32 const ldmHSize = 1U << params->hashLog;
  437. U32 const correction = ZSTD_window_correctOverflow(
  438. &ldmState->window, /* cycleLog */ 0, maxDist, chunkStart);
  439. ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction);
  440. /* invalidate dictionaries on overflow correction */
  441. ldmState->loadedDictEnd = 0;
  442. }
  443. /* 2. We enforce the maximum offset allowed.
  444. *
  445. * kMaxChunkSize should be small enough that we don't lose too much of
  446. * the window through early invalidation.
  447. * TODO: * Test the chunk size.
  448. * * Try invalidation after the sequence generation and test the
  449. * the offset against maxDist directly.
  450. *
  451. * NOTE: Because of dictionaries + sequence splitting we MUST make sure
  452. * that any offset used is valid at the END of the sequence, since it may
  453. * be split into two sequences. This condition holds when using
  454. * ZSTD_window_enforceMaxDist(), but if we move to checking offsets
  455. * against maxDist directly, we'll have to carefully handle that case.
  456. */
  457. ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, &ldmState->loadedDictEnd, NULL);
  458. /* 3. Generate the sequences for the chunk, and get newLeftoverSize. */
  459. newLeftoverSize = ZSTD_ldm_generateSequences_internal(
  460. ldmState, sequences, params, chunkStart, chunkSize);
  461. if (ZSTD_isError(newLeftoverSize))
  462. return newLeftoverSize;
  463. /* 4. We add the leftover literals from previous iterations to the first
  464. * newly generated sequence, or add the `newLeftoverSize` if none are
  465. * generated.
  466. */
  467. /* Prepend the leftover literals from the last call */
  468. if (prevSize < sequences->size) {
  469. sequences->seq[prevSize].litLength += (U32)leftoverSize;
  470. leftoverSize = newLeftoverSize;
  471. } else {
  472. assert(newLeftoverSize == chunkSize);
  473. leftoverSize += chunkSize;
  474. }
  475. }
  476. return 0;
  477. }
  478. void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) {
  479. while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
  480. rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;
  481. if (srcSize <= seq->litLength) {
  482. /* Skip past srcSize literals */
  483. seq->litLength -= (U32)srcSize;
  484. return;
  485. }
  486. srcSize -= seq->litLength;
  487. seq->litLength = 0;
  488. if (srcSize < seq->matchLength) {
  489. /* Skip past the first srcSize of the match */
  490. seq->matchLength -= (U32)srcSize;
  491. if (seq->matchLength < minMatch) {
  492. /* The match is too short, omit it */
  493. if (rawSeqStore->pos + 1 < rawSeqStore->size) {
  494. seq[1].litLength += seq[0].matchLength;
  495. }
  496. rawSeqStore->pos++;
  497. }
  498. return;
  499. }
  500. srcSize -= seq->matchLength;
  501. seq->matchLength = 0;
  502. rawSeqStore->pos++;
  503. }
  504. }
  505. /**
  506. * If the sequence length is longer than remaining then the sequence is split
  507. * between this block and the next.
  508. *
  509. * Returns the current sequence to handle, or if the rest of the block should
  510. * be literals, it returns a sequence with offset == 0.
  511. */
  512. static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
  513. U32 const remaining, U32 const minMatch)
  514. {
  515. rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos];
  516. assert(sequence.offset > 0);
  517. /* Likely: No partial sequence */
  518. if (remaining >= sequence.litLength + sequence.matchLength) {
  519. rawSeqStore->pos++;
  520. return sequence;
  521. }
  522. /* Cut the sequence short (offset == 0 ==> rest is literals). */
  523. if (remaining <= sequence.litLength) {
  524. sequence.offset = 0;
  525. } else if (remaining < sequence.litLength + sequence.matchLength) {
  526. sequence.matchLength = remaining - sequence.litLength;
  527. if (sequence.matchLength < minMatch) {
  528. sequence.offset = 0;
  529. }
  530. }
  531. /* Skip past `remaining` bytes for the future sequences. */
  532. ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch);
  533. return sequence;
  534. }
  535. void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
  536. U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
  537. while (currPos && rawSeqStore->pos < rawSeqStore->size) {
  538. rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
  539. if (currPos >= currSeq.litLength + currSeq.matchLength) {
  540. currPos -= currSeq.litLength + currSeq.matchLength;
  541. rawSeqStore->pos++;
  542. } else {
  543. rawSeqStore->posInSequence = currPos;
  544. break;
  545. }
  546. }
  547. if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
  548. rawSeqStore->posInSequence = 0;
  549. }
  550. }
  551. size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
  552. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  553. void const* src, size_t srcSize)
  554. {
  555. const ZSTD_compressionParameters* const cParams = &ms->cParams;
  556. unsigned const minMatch = cParams->minMatch;
  557. ZSTD_blockCompressor const blockCompressor =
  558. ZSTD_selectBlockCompressor(cParams->strategy, ZSTD_matchState_dictMode(ms));
  559. /* Input bounds */
  560. BYTE const* const istart = (BYTE const*)src;
  561. BYTE const* const iend = istart + srcSize;
  562. /* Input positions */
  563. BYTE const* ip = istart;
  564. DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize);
  565. /* If using opt parser, use LDMs only as candidates rather than always accepting them */
  566. if (cParams->strategy >= ZSTD_btopt) {
  567. size_t lastLLSize;
  568. ms->ldmSeqStore = rawSeqStore;
  569. lastLLSize = blockCompressor(ms, seqStore, rep, src, srcSize);
  570. ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore, srcSize);
  571. return lastLLSize;
  572. }
  573. assert(rawSeqStore->pos <= rawSeqStore->size);
  574. assert(rawSeqStore->size <= rawSeqStore->capacity);
  575. /* Loop through each sequence and apply the block compressor to the lits */
  576. while (rawSeqStore->pos < rawSeqStore->size && ip < iend) {
  577. /* maybeSplitSequence updates rawSeqStore->pos */
  578. rawSeq const sequence = maybeSplitSequence(rawSeqStore,
  579. (U32)(iend - ip), minMatch);
  580. int i;
  581. /* End signal */
  582. if (sequence.offset == 0)
  583. break;
  584. assert(ip + sequence.litLength + sequence.matchLength <= iend);
  585. /* Fill tables for block compressor */
  586. ZSTD_ldm_limitTableUpdate(ms, ip);
  587. ZSTD_ldm_fillFastTables(ms, ip);
  588. /* Run the block compressor */
  589. DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength);
  590. {
  591. size_t const newLitLength =
  592. blockCompressor(ms, seqStore, rep, ip, sequence.litLength);
  593. ip += sequence.litLength;
  594. /* Update the repcodes */
  595. for (i = ZSTD_REP_NUM - 1; i > 0; i--)
  596. rep[i] = rep[i-1];
  597. rep[0] = sequence.offset;
  598. /* Store the sequence */
  599. ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,
  600. sequence.offset + ZSTD_REP_MOVE,
  601. sequence.matchLength - MINMATCH);
  602. ip += sequence.matchLength;
  603. }
  604. }
  605. /* Fill the tables for the block compressor */
  606. ZSTD_ldm_limitTableUpdate(ms, ip);
  607. ZSTD_ldm_fillFastTables(ms, ip);
  608. /* Compress the last literals */
  609. return blockCompressor(ms, seqStore, rep, ip, iend - ip);
  610. }