zstd_fast.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675
  1. /*
  2. * Copyright (c) Yann Collet, Facebook, Inc.
  3. * All rights reserved.
  4. *
  5. * This source code is licensed under both the BSD-style license (found in the
  6. * LICENSE file in the root directory of this source tree) and the GPLv2 (found
  7. * in the COPYING file in the root directory of this source tree).
  8. * You may select, at your option, one of the above-listed licenses.
  9. */
  10. #include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
  11. #include "zstd_fast.h"
  12. void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
  13. const void* const end,
  14. ZSTD_dictTableLoadMethod_e dtlm)
  15. {
  16. const ZSTD_compressionParameters* const cParams = &ms->cParams;
  17. U32* const hashTable = ms->hashTable;
  18. U32 const hBits = cParams->hashLog;
  19. U32 const mls = cParams->minMatch;
  20. const BYTE* const base = ms->window.base;
  21. const BYTE* ip = base + ms->nextToUpdate;
  22. const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
  23. const U32 fastHashFillStep = 3;
  24. /* Always insert every fastHashFillStep position into the hash table.
  25. * Insert the other positions if their hash entry is empty.
  26. */
  27. for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
  28. U32 const curr = (U32)(ip - base);
  29. size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls);
  30. hashTable[hash0] = curr;
  31. if (dtlm == ZSTD_dtlm_fast) continue;
  32. /* Only load extra positions for ZSTD_dtlm_full */
  33. { U32 p;
  34. for (p = 1; p < fastHashFillStep; ++p) {
  35. size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls);
  36. if (hashTable[hash] == 0) { /* not yet filled */
  37. hashTable[hash] = curr + p;
  38. } } } }
  39. }
  40. /**
  41. * If you squint hard enough (and ignore repcodes), the search operation at any
  42. * given position is broken into 4 stages:
  43. *
  44. * 1. Hash (map position to hash value via input read)
  45. * 2. Lookup (map hash val to index via hashtable read)
  46. * 3. Load (map index to value at that position via input read)
  47. * 4. Compare
  48. *
  49. * Each of these steps involves a memory read at an address which is computed
  50. * from the previous step. This means these steps must be sequenced and their
  51. * latencies are cumulative.
  52. *
  53. * Rather than do 1->2->3->4 sequentially for a single position before moving
  54. * onto the next, this implementation interleaves these operations across the
  55. * next few positions:
  56. *
  57. * R = Repcode Read & Compare
  58. * H = Hash
  59. * T = Table Lookup
  60. * M = Match Read & Compare
  61. *
  62. * Pos | Time -->
  63. * ----+-------------------
  64. * N | ... M
  65. * N+1 | ... TM
  66. * N+2 | R H T M
  67. * N+3 | H TM
  68. * N+4 | R H T M
  69. * N+5 | H ...
  70. * N+6 | R ...
  71. *
  72. * This is very much analogous to the pipelining of execution in a CPU. And just
  73. * like a CPU, we have to dump the pipeline when we find a match (i.e., take a
  74. * branch).
  75. *
  76. * When this happens, we throw away our current state, and do the following prep
  77. * to re-enter the loop:
  78. *
  79. * Pos | Time -->
  80. * ----+-------------------
  81. * N | H T
  82. * N+1 | H
  83. *
  84. * This is also the work we do at the beginning to enter the loop initially.
  85. */
  86. FORCE_INLINE_TEMPLATE size_t
  87. ZSTD_compressBlock_fast_noDict_generic(
  88. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  89. void const* src, size_t srcSize,
  90. U32 const mls, U32 const hasStep)
  91. {
  92. const ZSTD_compressionParameters* const cParams = &ms->cParams;
  93. U32* const hashTable = ms->hashTable;
  94. U32 const hlog = cParams->hashLog;
  95. /* support stepSize of 0 */
  96. size_t const stepSize = hasStep ? (cParams->targetLength + !(cParams->targetLength) + 1) : 2;
  97. const BYTE* const base = ms->window.base;
  98. const BYTE* const istart = (const BYTE*)src;
  99. const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
  100. const U32 prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
  101. const BYTE* const prefixStart = base + prefixStartIndex;
  102. const BYTE* const iend = istart + srcSize;
  103. const BYTE* const ilimit = iend - HASH_READ_SIZE;
  104. const BYTE* anchor = istart;
  105. const BYTE* ip0 = istart;
  106. const BYTE* ip1;
  107. const BYTE* ip2;
  108. const BYTE* ip3;
  109. U32 current0;
  110. U32 rep_offset1 = rep[0];
  111. U32 rep_offset2 = rep[1];
  112. U32 offsetSaved = 0;
  113. size_t hash0; /* hash for ip0 */
  114. size_t hash1; /* hash for ip1 */
  115. U32 idx; /* match idx for ip0 */
  116. U32 mval; /* src value at match idx */
  117. U32 offcode;
  118. const BYTE* match0;
  119. size_t mLength;
  120. /* ip0 and ip1 are always adjacent. The targetLength skipping and
  121. * uncompressibility acceleration is applied to every other position,
  122. * matching the behavior of #1562. step therefore represents the gap
  123. * between pairs of positions, from ip0 to ip2 or ip1 to ip3. */
  124. size_t step;
  125. const BYTE* nextStep;
  126. const size_t kStepIncr = (1 << (kSearchStrength - 1));
  127. DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
  128. ip0 += (ip0 == prefixStart);
  129. { U32 const curr = (U32)(ip0 - base);
  130. U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
  131. U32 const maxRep = curr - windowLow;
  132. if (rep_offset2 > maxRep) offsetSaved = rep_offset2, rep_offset2 = 0;
  133. if (rep_offset1 > maxRep) offsetSaved = rep_offset1, rep_offset1 = 0;
  134. }
  135. /* start each op */
  136. _start: /* Requires: ip0 */
  137. step = stepSize;
  138. nextStep = ip0 + kStepIncr;
  139. /* calculate positions, ip0 - anchor == 0, so we skip step calc */
  140. ip1 = ip0 + 1;
  141. ip2 = ip0 + step;
  142. ip3 = ip2 + 1;
  143. if (ip3 >= ilimit) {
  144. goto _cleanup;
  145. }
  146. hash0 = ZSTD_hashPtr(ip0, hlog, mls);
  147. hash1 = ZSTD_hashPtr(ip1, hlog, mls);
  148. idx = hashTable[hash0];
  149. do {
  150. /* load repcode match for ip[2]*/
  151. const U32 rval = MEM_read32(ip2 - rep_offset1);
  152. /* write back hash table entry */
  153. current0 = (U32)(ip0 - base);
  154. hashTable[hash0] = current0;
  155. /* check repcode at ip[2] */
  156. if ((MEM_read32(ip2) == rval) & (rep_offset1 > 0)) {
  157. ip0 = ip2;
  158. match0 = ip0 - rep_offset1;
  159. mLength = ip0[-1] == match0[-1];
  160. ip0 -= mLength;
  161. match0 -= mLength;
  162. offcode = STORE_REPCODE_1;
  163. mLength += 4;
  164. goto _match;
  165. }
  166. /* load match for ip[0] */
  167. if (idx >= prefixStartIndex) {
  168. mval = MEM_read32(base + idx);
  169. } else {
  170. mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
  171. }
  172. /* check match at ip[0] */
  173. if (MEM_read32(ip0) == mval) {
  174. /* found a match! */
  175. goto _offset;
  176. }
  177. /* lookup ip[1] */
  178. idx = hashTable[hash1];
  179. /* hash ip[2] */
  180. hash0 = hash1;
  181. hash1 = ZSTD_hashPtr(ip2, hlog, mls);
  182. /* advance to next positions */
  183. ip0 = ip1;
  184. ip1 = ip2;
  185. ip2 = ip3;
  186. /* write back hash table entry */
  187. current0 = (U32)(ip0 - base);
  188. hashTable[hash0] = current0;
  189. /* load match for ip[0] */
  190. if (idx >= prefixStartIndex) {
  191. mval = MEM_read32(base + idx);
  192. } else {
  193. mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
  194. }
  195. /* check match at ip[0] */
  196. if (MEM_read32(ip0) == mval) {
  197. /* found a match! */
  198. goto _offset;
  199. }
  200. /* lookup ip[1] */
  201. idx = hashTable[hash1];
  202. /* hash ip[2] */
  203. hash0 = hash1;
  204. hash1 = ZSTD_hashPtr(ip2, hlog, mls);
  205. /* advance to next positions */
  206. ip0 = ip1;
  207. ip1 = ip2;
  208. ip2 = ip0 + step;
  209. ip3 = ip1 + step;
  210. /* calculate step */
  211. if (ip2 >= nextStep) {
  212. step++;
  213. PREFETCH_L1(ip1 + 64);
  214. PREFETCH_L1(ip1 + 128);
  215. nextStep += kStepIncr;
  216. }
  217. } while (ip3 < ilimit);
  218. _cleanup:
  219. /* Note that there are probably still a couple positions we could search.
  220. * However, it seems to be a meaningful performance hit to try to search
  221. * them. So let's not. */
  222. /* save reps for next block */
  223. rep[0] = rep_offset1 ? rep_offset1 : offsetSaved;
  224. rep[1] = rep_offset2 ? rep_offset2 : offsetSaved;
  225. /* Return the last literals size */
  226. return (size_t)(iend - anchor);
  227. _offset: /* Requires: ip0, idx */
  228. /* Compute the offset code. */
  229. match0 = base + idx;
  230. rep_offset2 = rep_offset1;
  231. rep_offset1 = (U32)(ip0-match0);
  232. offcode = STORE_OFFSET(rep_offset1);
  233. mLength = 4;
  234. /* Count the backwards match length. */
  235. while (((ip0>anchor) & (match0>prefixStart)) && (ip0[-1] == match0[-1])) {
  236. ip0--;
  237. match0--;
  238. mLength++;
  239. }
  240. _match: /* Requires: ip0, match0, offcode */
  241. /* Count the forward length. */
  242. mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend);
  243. ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength);
  244. ip0 += mLength;
  245. anchor = ip0;
  246. /* write next hash table entry */
  247. if (ip1 < ip0) {
  248. hashTable[hash1] = (U32)(ip1 - base);
  249. }
  250. /* Fill table and check for immediate repcode. */
  251. if (ip0 <= ilimit) {
  252. /* Fill Table */
  253. assert(base+current0+2 > istart); /* check base overflow */
  254. hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
  255. hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
  256. if (rep_offset2 > 0) { /* rep_offset2==0 means rep_offset2 is invalidated */
  257. while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - rep_offset2)) ) {
  258. /* store sequence */
  259. size_t const rLength = ZSTD_count(ip0+4, ip0+4-rep_offset2, iend) + 4;
  260. { U32 const tmpOff = rep_offset2; rep_offset2 = rep_offset1; rep_offset1 = tmpOff; } /* swap rep_offset2 <=> rep_offset1 */
  261. hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
  262. ip0 += rLength;
  263. ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, STORE_REPCODE_1, rLength);
  264. anchor = ip0;
  265. continue; /* faster when present (confirmed on gcc-8) ... (?) */
  266. } } }
  267. goto _start;
  268. }
  269. #define ZSTD_GEN_FAST_FN(dictMode, mls, step) \
  270. static size_t ZSTD_compressBlock_fast_##dictMode##_##mls##_##step( \
  271. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
  272. void const* src, size_t srcSize) \
  273. { \
  274. return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls, step); \
  275. }
  276. ZSTD_GEN_FAST_FN(noDict, 4, 1)
  277. ZSTD_GEN_FAST_FN(noDict, 5, 1)
  278. ZSTD_GEN_FAST_FN(noDict, 6, 1)
  279. ZSTD_GEN_FAST_FN(noDict, 7, 1)
  280. ZSTD_GEN_FAST_FN(noDict, 4, 0)
  281. ZSTD_GEN_FAST_FN(noDict, 5, 0)
  282. ZSTD_GEN_FAST_FN(noDict, 6, 0)
  283. ZSTD_GEN_FAST_FN(noDict, 7, 0)
  284. size_t ZSTD_compressBlock_fast(
  285. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  286. void const* src, size_t srcSize)
  287. {
  288. U32 const mls = ms->cParams.minMatch;
  289. assert(ms->dictMatchState == NULL);
  290. if (ms->cParams.targetLength > 1) {
  291. switch(mls)
  292. {
  293. default: /* includes case 3 */
  294. case 4 :
  295. return ZSTD_compressBlock_fast_noDict_4_1(ms, seqStore, rep, src, srcSize);
  296. case 5 :
  297. return ZSTD_compressBlock_fast_noDict_5_1(ms, seqStore, rep, src, srcSize);
  298. case 6 :
  299. return ZSTD_compressBlock_fast_noDict_6_1(ms, seqStore, rep, src, srcSize);
  300. case 7 :
  301. return ZSTD_compressBlock_fast_noDict_7_1(ms, seqStore, rep, src, srcSize);
  302. }
  303. } else {
  304. switch(mls)
  305. {
  306. default: /* includes case 3 */
  307. case 4 :
  308. return ZSTD_compressBlock_fast_noDict_4_0(ms, seqStore, rep, src, srcSize);
  309. case 5 :
  310. return ZSTD_compressBlock_fast_noDict_5_0(ms, seqStore, rep, src, srcSize);
  311. case 6 :
  312. return ZSTD_compressBlock_fast_noDict_6_0(ms, seqStore, rep, src, srcSize);
  313. case 7 :
  314. return ZSTD_compressBlock_fast_noDict_7_0(ms, seqStore, rep, src, srcSize);
  315. }
  316. }
  317. }
  318. FORCE_INLINE_TEMPLATE
  319. size_t ZSTD_compressBlock_fast_dictMatchState_generic(
  320. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  321. void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
  322. {
  323. const ZSTD_compressionParameters* const cParams = &ms->cParams;
  324. U32* const hashTable = ms->hashTable;
  325. U32 const hlog = cParams->hashLog;
  326. /* support stepSize of 0 */
  327. U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
  328. const BYTE* const base = ms->window.base;
  329. const BYTE* const istart = (const BYTE*)src;
  330. const BYTE* ip = istart;
  331. const BYTE* anchor = istart;
  332. const U32 prefixStartIndex = ms->window.dictLimit;
  333. const BYTE* const prefixStart = base + prefixStartIndex;
  334. const BYTE* const iend = istart + srcSize;
  335. const BYTE* const ilimit = iend - HASH_READ_SIZE;
  336. U32 offset_1=rep[0], offset_2=rep[1];
  337. U32 offsetSaved = 0;
  338. const ZSTD_matchState_t* const dms = ms->dictMatchState;
  339. const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
  340. const U32* const dictHashTable = dms->hashTable;
  341. const U32 dictStartIndex = dms->window.dictLimit;
  342. const BYTE* const dictBase = dms->window.base;
  343. const BYTE* const dictStart = dictBase + dictStartIndex;
  344. const BYTE* const dictEnd = dms->window.nextSrc;
  345. const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase);
  346. const U32 dictAndPrefixLength = (U32)(ip - prefixStart + dictEnd - dictStart);
  347. const U32 dictHLog = dictCParams->hashLog;
  348. /* if a dictionary is still attached, it necessarily means that
  349. * it is within window size. So we just check it. */
  350. const U32 maxDistance = 1U << cParams->windowLog;
  351. const U32 endIndex = (U32)((size_t)(ip - base) + srcSize);
  352. assert(endIndex - prefixStartIndex <= maxDistance);
  353. (void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */
  354. (void)hasStep; /* not currently specialized on whether it's accelerated */
  355. /* ensure there will be no underflow
  356. * when translating a dict index into a local index */
  357. assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
  358. /* init */
  359. DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic");
  360. ip += (dictAndPrefixLength == 0);
  361. /* dictMatchState repCode checks don't currently handle repCode == 0
  362. * disabling. */
  363. assert(offset_1 <= dictAndPrefixLength);
  364. assert(offset_2 <= dictAndPrefixLength);
  365. /* Main Search Loop */
  366. while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
  367. size_t mLength;
  368. size_t const h = ZSTD_hashPtr(ip, hlog, mls);
  369. U32 const curr = (U32)(ip-base);
  370. U32 const matchIndex = hashTable[h];
  371. const BYTE* match = base + matchIndex;
  372. const U32 repIndex = curr + 1 - offset_1;
  373. const BYTE* repMatch = (repIndex < prefixStartIndex) ?
  374. dictBase + (repIndex - dictIndexDelta) :
  375. base + repIndex;
  376. hashTable[h] = curr; /* update hash table */
  377. if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
  378. && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
  379. const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
  380. mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
  381. ip++;
  382. ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
  383. } else if ( (matchIndex <= prefixStartIndex) ) {
  384. size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
  385. U32 const dictMatchIndex = dictHashTable[dictHash];
  386. const BYTE* dictMatch = dictBase + dictMatchIndex;
  387. if (dictMatchIndex <= dictStartIndex ||
  388. MEM_read32(dictMatch) != MEM_read32(ip)) {
  389. assert(stepSize >= 1);
  390. ip += ((ip-anchor) >> kSearchStrength) + stepSize;
  391. continue;
  392. } else {
  393. /* found a dict match */
  394. U32 const offset = (U32)(curr-dictMatchIndex-dictIndexDelta);
  395. mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
  396. while (((ip>anchor) & (dictMatch>dictStart))
  397. && (ip[-1] == dictMatch[-1])) {
  398. ip--; dictMatch--; mLength++;
  399. } /* catch up */
  400. offset_2 = offset_1;
  401. offset_1 = offset;
  402. ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
  403. }
  404. } else if (MEM_read32(match) != MEM_read32(ip)) {
  405. /* it's not a match, and we're not going to check the dictionary */
  406. assert(stepSize >= 1);
  407. ip += ((ip-anchor) >> kSearchStrength) + stepSize;
  408. continue;
  409. } else {
  410. /* found a regular match */
  411. U32 const offset = (U32)(ip-match);
  412. mLength = ZSTD_count(ip+4, match+4, iend) + 4;
  413. while (((ip>anchor) & (match>prefixStart))
  414. && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
  415. offset_2 = offset_1;
  416. offset_1 = offset;
  417. ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
  418. }
  419. /* match found */
  420. ip += mLength;
  421. anchor = ip;
  422. if (ip <= ilimit) {
  423. /* Fill Table */
  424. assert(base+curr+2 > istart); /* check base overflow */
  425. hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2; /* here because curr+2 could be > iend-8 */
  426. hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
  427. /* check immediate repcode */
  428. while (ip <= ilimit) {
  429. U32 const current2 = (U32)(ip-base);
  430. U32 const repIndex2 = current2 - offset_2;
  431. const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
  432. dictBase - dictIndexDelta + repIndex2 :
  433. base + repIndex2;
  434. if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
  435. && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
  436. const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
  437. size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
  438. U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
  439. ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2);
  440. hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
  441. ip += repLength2;
  442. anchor = ip;
  443. continue;
  444. }
  445. break;
  446. }
  447. }
  448. }
  449. /* save reps for next block */
  450. rep[0] = offset_1 ? offset_1 : offsetSaved;
  451. rep[1] = offset_2 ? offset_2 : offsetSaved;
  452. /* Return the last literals size */
  453. return (size_t)(iend - anchor);
  454. }
  455. ZSTD_GEN_FAST_FN(dictMatchState, 4, 0)
  456. ZSTD_GEN_FAST_FN(dictMatchState, 5, 0)
  457. ZSTD_GEN_FAST_FN(dictMatchState, 6, 0)
  458. ZSTD_GEN_FAST_FN(dictMatchState, 7, 0)
  459. size_t ZSTD_compressBlock_fast_dictMatchState(
  460. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  461. void const* src, size_t srcSize)
  462. {
  463. U32 const mls = ms->cParams.minMatch;
  464. assert(ms->dictMatchState != NULL);
  465. switch(mls)
  466. {
  467. default: /* includes case 3 */
  468. case 4 :
  469. return ZSTD_compressBlock_fast_dictMatchState_4_0(ms, seqStore, rep, src, srcSize);
  470. case 5 :
  471. return ZSTD_compressBlock_fast_dictMatchState_5_0(ms, seqStore, rep, src, srcSize);
  472. case 6 :
  473. return ZSTD_compressBlock_fast_dictMatchState_6_0(ms, seqStore, rep, src, srcSize);
  474. case 7 :
  475. return ZSTD_compressBlock_fast_dictMatchState_7_0(ms, seqStore, rep, src, srcSize);
  476. }
  477. }
  478. static size_t ZSTD_compressBlock_fast_extDict_generic(
  479. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  480. void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
  481. {
  482. const ZSTD_compressionParameters* const cParams = &ms->cParams;
  483. U32* const hashTable = ms->hashTable;
  484. U32 const hlog = cParams->hashLog;
  485. /* support stepSize of 0 */
  486. U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
  487. const BYTE* const base = ms->window.base;
  488. const BYTE* const dictBase = ms->window.dictBase;
  489. const BYTE* const istart = (const BYTE*)src;
  490. const BYTE* ip = istart;
  491. const BYTE* anchor = istart;
  492. const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
  493. const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
  494. const U32 dictStartIndex = lowLimit;
  495. const BYTE* const dictStart = dictBase + dictStartIndex;
  496. const U32 dictLimit = ms->window.dictLimit;
  497. const U32 prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit;
  498. const BYTE* const prefixStart = base + prefixStartIndex;
  499. const BYTE* const dictEnd = dictBase + prefixStartIndex;
  500. const BYTE* const iend = istart + srcSize;
  501. const BYTE* const ilimit = iend - 8;
  502. U32 offset_1=rep[0], offset_2=rep[1];
  503. (void)hasStep; /* not currently specialized on whether it's accelerated */
  504. DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1);
  505. /* switch to "regular" variant if extDict is invalidated due to maxDistance */
  506. if (prefixStartIndex == dictStartIndex)
  507. return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize);
  508. /* Search Loop */
  509. while (ip < ilimit) { /* < instead of <=, because (ip+1) */
  510. const size_t h = ZSTD_hashPtr(ip, hlog, mls);
  511. const U32 matchIndex = hashTable[h];
  512. const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
  513. const BYTE* match = matchBase + matchIndex;
  514. const U32 curr = (U32)(ip-base);
  515. const U32 repIndex = curr + 1 - offset_1;
  516. const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
  517. const BYTE* const repMatch = repBase + repIndex;
  518. hashTable[h] = curr; /* update hash table */
  519. DEBUGLOG(7, "offset_1 = %u , curr = %u", offset_1, curr);
  520. if ( ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */
  521. & (offset_1 <= curr+1 - dictStartIndex) ) /* note: we are searching at curr+1 */
  522. && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
  523. const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
  524. size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
  525. ip++;
  526. ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, rLength);
  527. ip += rLength;
  528. anchor = ip;
  529. } else {
  530. if ( (matchIndex < dictStartIndex) ||
  531. (MEM_read32(match) != MEM_read32(ip)) ) {
  532. assert(stepSize >= 1);
  533. ip += ((ip-anchor) >> kSearchStrength) + stepSize;
  534. continue;
  535. }
  536. { const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
  537. const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
  538. U32 const offset = curr - matchIndex;
  539. size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
  540. while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
  541. offset_2 = offset_1; offset_1 = offset; /* update offset history */
  542. ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
  543. ip += mLength;
  544. anchor = ip;
  545. } }
  546. if (ip <= ilimit) {
  547. /* Fill Table */
  548. hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2;
  549. hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
  550. /* check immediate repcode */
  551. while (ip <= ilimit) {
  552. U32 const current2 = (U32)(ip-base);
  553. U32 const repIndex2 = current2 - offset_2;
  554. const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
  555. if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 <= curr - dictStartIndex)) /* intentional overflow */
  556. && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
  557. const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
  558. size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
  559. { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
  560. ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, STORE_REPCODE_1, repLength2);
  561. hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
  562. ip += repLength2;
  563. anchor = ip;
  564. continue;
  565. }
  566. break;
  567. } } }
  568. /* save reps for next block */
  569. rep[0] = offset_1;
  570. rep[1] = offset_2;
  571. /* Return the last literals size */
  572. return (size_t)(iend - anchor);
  573. }
  574. ZSTD_GEN_FAST_FN(extDict, 4, 0)
  575. ZSTD_GEN_FAST_FN(extDict, 5, 0)
  576. ZSTD_GEN_FAST_FN(extDict, 6, 0)
  577. ZSTD_GEN_FAST_FN(extDict, 7, 0)
  578. size_t ZSTD_compressBlock_fast_extDict(
  579. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  580. void const* src, size_t srcSize)
  581. {
  582. U32 const mls = ms->cParams.minMatch;
  583. switch(mls)
  584. {
  585. default: /* includes case 3 */
  586. case 4 :
  587. return ZSTD_compressBlock_fast_extDict_4_0(ms, seqStore, rep, src, srcSize);
  588. case 5 :
  589. return ZSTD_compressBlock_fast_extDict_5_0(ms, seqStore, rep, src, srcSize);
  590. case 6 :
  591. return ZSTD_compressBlock_fast_extDict_6_0(ms, seqStore, rep, src, srcSize);
  592. case 7 :
  593. return ZSTD_compressBlock_fast_extDict_7_0(ms, seqStore, rep, src, srcSize);
  594. }
  595. }