zstd_double_fast.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696
  1. /*
  2. * Copyright (c) Yann Collet, Facebook, Inc.
  3. * All rights reserved.
  4. *
  5. * This source code is licensed under both the BSD-style license (found in the
  6. * LICENSE file in the root directory of this source tree) and the GPLv2 (found
  7. * in the COPYING file in the root directory of this source tree).
  8. * You may select, at your option, one of the above-listed licenses.
  9. */
  10. #include "zstd_compress_internal.h"
  11. #include "zstd_double_fast.h"
  12. void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
  13. void const* end, ZSTD_dictTableLoadMethod_e dtlm)
  14. {
  15. const ZSTD_compressionParameters* const cParams = &ms->cParams;
  16. U32* const hashLarge = ms->hashTable;
  17. U32 const hBitsL = cParams->hashLog;
  18. U32 const mls = cParams->minMatch;
  19. U32* const hashSmall = ms->chainTable;
  20. U32 const hBitsS = cParams->chainLog;
  21. const BYTE* const base = ms->window.base;
  22. const BYTE* ip = base + ms->nextToUpdate;
  23. const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
  24. const U32 fastHashFillStep = 3;
  25. /* Always insert every fastHashFillStep position into the hash tables.
  26. * Insert the other positions into the large hash table if their entry
  27. * is empty.
  28. */
  29. for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
  30. U32 const curr = (U32)(ip - base);
  31. U32 i;
  32. for (i = 0; i < fastHashFillStep; ++i) {
  33. size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls);
  34. size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8);
  35. if (i == 0)
  36. hashSmall[smHash] = curr + i;
  37. if (i == 0 || hashLarge[lgHash] == 0)
  38. hashLarge[lgHash] = curr + i;
  39. /* Only load extra positions for ZSTD_dtlm_full */
  40. if (dtlm == ZSTD_dtlm_fast)
  41. break;
  42. } }
  43. }
  44. FORCE_INLINE_TEMPLATE
  45. size_t ZSTD_compressBlock_doubleFast_noDict_generic(
  46. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  47. void const* src, size_t srcSize, U32 const mls /* template */)
  48. {
  49. ZSTD_compressionParameters const* cParams = &ms->cParams;
  50. U32* const hashLong = ms->hashTable;
  51. const U32 hBitsL = cParams->hashLog;
  52. U32* const hashSmall = ms->chainTable;
  53. const U32 hBitsS = cParams->chainLog;
  54. const BYTE* const base = ms->window.base;
  55. const BYTE* const istart = (const BYTE*)src;
  56. const BYTE* anchor = istart;
  57. const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
  58. /* presumes that, if there is a dictionary, it must be using Attach mode */
  59. const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
  60. const BYTE* const prefixLowest = base + prefixLowestIndex;
  61. const BYTE* const iend = istart + srcSize;
  62. const BYTE* const ilimit = iend - HASH_READ_SIZE;
  63. U32 offset_1=rep[0], offset_2=rep[1];
  64. U32 offsetSaved = 0;
  65. size_t mLength;
  66. U32 offset;
  67. U32 curr;
  68. /* how many positions to search before increasing step size */
  69. const size_t kStepIncr = 1 << kSearchStrength;
  70. /* the position at which to increment the step size if no match is found */
  71. const BYTE* nextStep;
  72. size_t step; /* the current step size */
  73. size_t hl0; /* the long hash at ip */
  74. size_t hl1; /* the long hash at ip1 */
  75. U32 idxl0; /* the long match index for ip */
  76. U32 idxl1; /* the long match index for ip1 */
  77. const BYTE* matchl0; /* the long match for ip */
  78. const BYTE* matchs0; /* the short match for ip */
  79. const BYTE* matchl1; /* the long match for ip1 */
  80. const BYTE* ip = istart; /* the current position */
  81. const BYTE* ip1; /* the next position */
  82. DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_noDict_generic");
  83. /* init */
  84. ip += ((ip - prefixLowest) == 0);
  85. {
  86. U32 const current = (U32)(ip - base);
  87. U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog);
  88. U32 const maxRep = current - windowLow;
  89. if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
  90. if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
  91. }
  92. /* Outer Loop: one iteration per match found and stored */
  93. while (1) {
  94. step = 1;
  95. nextStep = ip + kStepIncr;
  96. ip1 = ip + step;
  97. if (ip1 > ilimit) {
  98. goto _cleanup;
  99. }
  100. hl0 = ZSTD_hashPtr(ip, hBitsL, 8);
  101. idxl0 = hashLong[hl0];
  102. matchl0 = base + idxl0;
  103. /* Inner Loop: one iteration per search / position */
  104. do {
  105. const size_t hs0 = ZSTD_hashPtr(ip, hBitsS, mls);
  106. const U32 idxs0 = hashSmall[hs0];
  107. curr = (U32)(ip-base);
  108. matchs0 = base + idxs0;
  109. hashLong[hl0] = hashSmall[hs0] = curr; /* update hash tables */
  110. /* check noDict repcode */
  111. if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
  112. mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
  113. ip++;
  114. ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
  115. goto _match_stored;
  116. }
  117. hl1 = ZSTD_hashPtr(ip1, hBitsL, 8);
  118. if (idxl0 > prefixLowestIndex) {
  119. /* check prefix long match */
  120. if (MEM_read64(matchl0) == MEM_read64(ip)) {
  121. mLength = ZSTD_count(ip+8, matchl0+8, iend) + 8;
  122. offset = (U32)(ip-matchl0);
  123. while (((ip>anchor) & (matchl0>prefixLowest)) && (ip[-1] == matchl0[-1])) { ip--; matchl0--; mLength++; } /* catch up */
  124. goto _match_found;
  125. }
  126. }
  127. idxl1 = hashLong[hl1];
  128. matchl1 = base + idxl1;
  129. if (idxs0 > prefixLowestIndex) {
  130. /* check prefix short match */
  131. if (MEM_read32(matchs0) == MEM_read32(ip)) {
  132. goto _search_next_long;
  133. }
  134. }
  135. if (ip1 >= nextStep) {
  136. PREFETCH_L1(ip1 + 64);
  137. PREFETCH_L1(ip1 + 128);
  138. step++;
  139. nextStep += kStepIncr;
  140. }
  141. ip = ip1;
  142. ip1 += step;
  143. hl0 = hl1;
  144. idxl0 = idxl1;
  145. matchl0 = matchl1;
  146. #if defined(__aarch64__)
  147. PREFETCH_L1(ip+256);
  148. #endif
  149. } while (ip1 <= ilimit);
  150. _cleanup:
  151. /* save reps for next block */
  152. rep[0] = offset_1 ? offset_1 : offsetSaved;
  153. rep[1] = offset_2 ? offset_2 : offsetSaved;
  154. /* Return the last literals size */
  155. return (size_t)(iend - anchor);
  156. _search_next_long:
  157. /* check prefix long +1 match */
  158. if (idxl1 > prefixLowestIndex) {
  159. if (MEM_read64(matchl1) == MEM_read64(ip1)) {
  160. ip = ip1;
  161. mLength = ZSTD_count(ip+8, matchl1+8, iend) + 8;
  162. offset = (U32)(ip-matchl1);
  163. while (((ip>anchor) & (matchl1>prefixLowest)) && (ip[-1] == matchl1[-1])) { ip--; matchl1--; mLength++; } /* catch up */
  164. goto _match_found;
  165. }
  166. }
  167. /* if no long +1 match, explore the short match we found */
  168. mLength = ZSTD_count(ip+4, matchs0+4, iend) + 4;
  169. offset = (U32)(ip - matchs0);
  170. while (((ip>anchor) & (matchs0>prefixLowest)) && (ip[-1] == matchs0[-1])) { ip--; matchs0--; mLength++; } /* catch up */
  171. /* fall-through */
  172. _match_found: /* requires ip, offset, mLength */
  173. offset_2 = offset_1;
  174. offset_1 = offset;
  175. if (step < 4) {
  176. /* It is unsafe to write this value back to the hashtable when ip1 is
  177. * greater than or equal to the new ip we will have after we're done
  178. * processing this match. Rather than perform that test directly
  179. * (ip1 >= ip + mLength), which costs speed in practice, we do a simpler
  180. * more predictable test. The minmatch even if we take a short match is
  181. * 4 bytes, so as long as step, the distance between ip and ip1
  182. * (initially) is less than 4, we know ip1 < new ip. */
  183. hashLong[hl1] = (U32)(ip1 - base);
  184. }
  185. ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
  186. _match_stored:
  187. /* match found */
  188. ip += mLength;
  189. anchor = ip;
  190. if (ip <= ilimit) {
  191. /* Complementary insertion */
  192. /* done after iLimit test, as candidates could be > iend-8 */
  193. { U32 const indexToInsert = curr+2;
  194. hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
  195. hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
  196. hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
  197. hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
  198. }
  199. /* check immediate repcode */
  200. while ( (ip <= ilimit)
  201. && ( (offset_2>0)
  202. & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
  203. /* store sequence */
  204. size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
  205. U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
  206. hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
  207. hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
  208. ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, rLength);
  209. ip += rLength;
  210. anchor = ip;
  211. continue; /* faster when present ... (?) */
  212. }
  213. }
  214. }
  215. }
  216. FORCE_INLINE_TEMPLATE
  217. size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
  218. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  219. void const* src, size_t srcSize,
  220. U32 const mls /* template */)
  221. {
  222. ZSTD_compressionParameters const* cParams = &ms->cParams;
  223. U32* const hashLong = ms->hashTable;
  224. const U32 hBitsL = cParams->hashLog;
  225. U32* const hashSmall = ms->chainTable;
  226. const U32 hBitsS = cParams->chainLog;
  227. const BYTE* const base = ms->window.base;
  228. const BYTE* const istart = (const BYTE*)src;
  229. const BYTE* ip = istart;
  230. const BYTE* anchor = istart;
  231. const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
  232. /* presumes that, if there is a dictionary, it must be using Attach mode */
  233. const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
  234. const BYTE* const prefixLowest = base + prefixLowestIndex;
  235. const BYTE* const iend = istart + srcSize;
  236. const BYTE* const ilimit = iend - HASH_READ_SIZE;
  237. U32 offset_1=rep[0], offset_2=rep[1];
  238. U32 offsetSaved = 0;
  239. const ZSTD_matchState_t* const dms = ms->dictMatchState;
  240. const ZSTD_compressionParameters* const dictCParams = &dms->cParams;
  241. const U32* const dictHashLong = dms->hashTable;
  242. const U32* const dictHashSmall = dms->chainTable;
  243. const U32 dictStartIndex = dms->window.dictLimit;
  244. const BYTE* const dictBase = dms->window.base;
  245. const BYTE* const dictStart = dictBase + dictStartIndex;
  246. const BYTE* const dictEnd = dms->window.nextSrc;
  247. const U32 dictIndexDelta = prefixLowestIndex - (U32)(dictEnd - dictBase);
  248. const U32 dictHBitsL = dictCParams->hashLog;
  249. const U32 dictHBitsS = dictCParams->chainLog;
  250. const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictStart));
  251. DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_dictMatchState_generic");
  252. /* if a dictionary is attached, it must be within window range */
  253. assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex);
  254. /* init */
  255. ip += (dictAndPrefixLength == 0);
  256. /* dictMatchState repCode checks don't currently handle repCode == 0
  257. * disabling. */
  258. assert(offset_1 <= dictAndPrefixLength);
  259. assert(offset_2 <= dictAndPrefixLength);
  260. /* Main Search Loop */
  261. while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
  262. size_t mLength;
  263. U32 offset;
  264. size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
  265. size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
  266. size_t const dictHL = ZSTD_hashPtr(ip, dictHBitsL, 8);
  267. size_t const dictHS = ZSTD_hashPtr(ip, dictHBitsS, mls);
  268. U32 const curr = (U32)(ip-base);
  269. U32 const matchIndexL = hashLong[h2];
  270. U32 matchIndexS = hashSmall[h];
  271. const BYTE* matchLong = base + matchIndexL;
  272. const BYTE* match = base + matchIndexS;
  273. const U32 repIndex = curr + 1 - offset_1;
  274. const BYTE* repMatch = (repIndex < prefixLowestIndex) ?
  275. dictBase + (repIndex - dictIndexDelta) :
  276. base + repIndex;
  277. hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
  278. /* check repcode */
  279. if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
  280. && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
  281. const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
  282. mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
  283. ip++;
  284. ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
  285. goto _match_stored;
  286. }
  287. if (matchIndexL > prefixLowestIndex) {
  288. /* check prefix long match */
  289. if (MEM_read64(matchLong) == MEM_read64(ip)) {
  290. mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
  291. offset = (U32)(ip-matchLong);
  292. while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
  293. goto _match_found;
  294. }
  295. } else {
  296. /* check dictMatchState long match */
  297. U32 const dictMatchIndexL = dictHashLong[dictHL];
  298. const BYTE* dictMatchL = dictBase + dictMatchIndexL;
  299. assert(dictMatchL < dictEnd);
  300. if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) {
  301. mLength = ZSTD_count_2segments(ip+8, dictMatchL+8, iend, dictEnd, prefixLowest) + 8;
  302. offset = (U32)(curr - dictMatchIndexL - dictIndexDelta);
  303. while (((ip>anchor) & (dictMatchL>dictStart)) && (ip[-1] == dictMatchL[-1])) { ip--; dictMatchL--; mLength++; } /* catch up */
  304. goto _match_found;
  305. } }
  306. if (matchIndexS > prefixLowestIndex) {
  307. /* check prefix short match */
  308. if (MEM_read32(match) == MEM_read32(ip)) {
  309. goto _search_next_long;
  310. }
  311. } else {
  312. /* check dictMatchState short match */
  313. U32 const dictMatchIndexS = dictHashSmall[dictHS];
  314. match = dictBase + dictMatchIndexS;
  315. matchIndexS = dictMatchIndexS + dictIndexDelta;
  316. if (match > dictStart && MEM_read32(match) == MEM_read32(ip)) {
  317. goto _search_next_long;
  318. } }
  319. ip += ((ip-anchor) >> kSearchStrength) + 1;
  320. #if defined(__aarch64__)
  321. PREFETCH_L1(ip+256);
  322. #endif
  323. continue;
  324. _search_next_long:
  325. { size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
  326. size_t const dictHLNext = ZSTD_hashPtr(ip+1, dictHBitsL, 8);
  327. U32 const matchIndexL3 = hashLong[hl3];
  328. const BYTE* matchL3 = base + matchIndexL3;
  329. hashLong[hl3] = curr + 1;
  330. /* check prefix long +1 match */
  331. if (matchIndexL3 > prefixLowestIndex) {
  332. if (MEM_read64(matchL3) == MEM_read64(ip+1)) {
  333. mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
  334. ip++;
  335. offset = (U32)(ip-matchL3);
  336. while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
  337. goto _match_found;
  338. }
  339. } else {
  340. /* check dict long +1 match */
  341. U32 const dictMatchIndexL3 = dictHashLong[dictHLNext];
  342. const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3;
  343. assert(dictMatchL3 < dictEnd);
  344. if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) {
  345. mLength = ZSTD_count_2segments(ip+1+8, dictMatchL3+8, iend, dictEnd, prefixLowest) + 8;
  346. ip++;
  347. offset = (U32)(curr + 1 - dictMatchIndexL3 - dictIndexDelta);
  348. while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */
  349. goto _match_found;
  350. } } }
  351. /* if no long +1 match, explore the short match we found */
  352. if (matchIndexS < prefixLowestIndex) {
  353. mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4;
  354. offset = (U32)(curr - matchIndexS);
  355. while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
  356. } else {
  357. mLength = ZSTD_count(ip+4, match+4, iend) + 4;
  358. offset = (U32)(ip - match);
  359. while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
  360. }
  361. _match_found:
  362. offset_2 = offset_1;
  363. offset_1 = offset;
  364. ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
  365. _match_stored:
  366. /* match found */
  367. ip += mLength;
  368. anchor = ip;
  369. if (ip <= ilimit) {
  370. /* Complementary insertion */
  371. /* done after iLimit test, as candidates could be > iend-8 */
  372. { U32 const indexToInsert = curr+2;
  373. hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
  374. hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
  375. hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
  376. hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
  377. }
  378. /* check immediate repcode */
  379. while (ip <= ilimit) {
  380. U32 const current2 = (U32)(ip-base);
  381. U32 const repIndex2 = current2 - offset_2;
  382. const BYTE* repMatch2 = repIndex2 < prefixLowestIndex ?
  383. dictBase + repIndex2 - dictIndexDelta :
  384. base + repIndex2;
  385. if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
  386. && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
  387. const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
  388. size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
  389. U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
  390. ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2);
  391. hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
  392. hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
  393. ip += repLength2;
  394. anchor = ip;
  395. continue;
  396. }
  397. break;
  398. }
  399. }
  400. } /* while (ip < ilimit) */
  401. /* save reps for next block */
  402. rep[0] = offset_1 ? offset_1 : offsetSaved;
  403. rep[1] = offset_2 ? offset_2 : offsetSaved;
  404. /* Return the last literals size */
  405. return (size_t)(iend - anchor);
  406. }
  407. #define ZSTD_GEN_DFAST_FN(dictMode, mls) \
  408. static size_t ZSTD_compressBlock_doubleFast_##dictMode##_##mls( \
  409. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
  410. void const* src, size_t srcSize) \
  411. { \
  412. return ZSTD_compressBlock_doubleFast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls); \
  413. }
  414. ZSTD_GEN_DFAST_FN(noDict, 4)
  415. ZSTD_GEN_DFAST_FN(noDict, 5)
  416. ZSTD_GEN_DFAST_FN(noDict, 6)
  417. ZSTD_GEN_DFAST_FN(noDict, 7)
  418. ZSTD_GEN_DFAST_FN(dictMatchState, 4)
  419. ZSTD_GEN_DFAST_FN(dictMatchState, 5)
  420. ZSTD_GEN_DFAST_FN(dictMatchState, 6)
  421. ZSTD_GEN_DFAST_FN(dictMatchState, 7)
  422. size_t ZSTD_compressBlock_doubleFast(
  423. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  424. void const* src, size_t srcSize)
  425. {
  426. const U32 mls = ms->cParams.minMatch;
  427. switch(mls)
  428. {
  429. default: /* includes case 3 */
  430. case 4 :
  431. return ZSTD_compressBlock_doubleFast_noDict_4(ms, seqStore, rep, src, srcSize);
  432. case 5 :
  433. return ZSTD_compressBlock_doubleFast_noDict_5(ms, seqStore, rep, src, srcSize);
  434. case 6 :
  435. return ZSTD_compressBlock_doubleFast_noDict_6(ms, seqStore, rep, src, srcSize);
  436. case 7 :
  437. return ZSTD_compressBlock_doubleFast_noDict_7(ms, seqStore, rep, src, srcSize);
  438. }
  439. }
  440. size_t ZSTD_compressBlock_doubleFast_dictMatchState(
  441. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  442. void const* src, size_t srcSize)
  443. {
  444. const U32 mls = ms->cParams.minMatch;
  445. switch(mls)
  446. {
  447. default: /* includes case 3 */
  448. case 4 :
  449. return ZSTD_compressBlock_doubleFast_dictMatchState_4(ms, seqStore, rep, src, srcSize);
  450. case 5 :
  451. return ZSTD_compressBlock_doubleFast_dictMatchState_5(ms, seqStore, rep, src, srcSize);
  452. case 6 :
  453. return ZSTD_compressBlock_doubleFast_dictMatchState_6(ms, seqStore, rep, src, srcSize);
  454. case 7 :
  455. return ZSTD_compressBlock_doubleFast_dictMatchState_7(ms, seqStore, rep, src, srcSize);
  456. }
  457. }
  458. static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
  459. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  460. void const* src, size_t srcSize,
  461. U32 const mls /* template */)
  462. {
  463. ZSTD_compressionParameters const* cParams = &ms->cParams;
  464. U32* const hashLong = ms->hashTable;
  465. U32 const hBitsL = cParams->hashLog;
  466. U32* const hashSmall = ms->chainTable;
  467. U32 const hBitsS = cParams->chainLog;
  468. const BYTE* const istart = (const BYTE*)src;
  469. const BYTE* ip = istart;
  470. const BYTE* anchor = istart;
  471. const BYTE* const iend = istart + srcSize;
  472. const BYTE* const ilimit = iend - 8;
  473. const BYTE* const base = ms->window.base;
  474. const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
  475. const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
  476. const U32 dictStartIndex = lowLimit;
  477. const U32 dictLimit = ms->window.dictLimit;
  478. const U32 prefixStartIndex = (dictLimit > lowLimit) ? dictLimit : lowLimit;
  479. const BYTE* const prefixStart = base + prefixStartIndex;
  480. const BYTE* const dictBase = ms->window.dictBase;
  481. const BYTE* const dictStart = dictBase + dictStartIndex;
  482. const BYTE* const dictEnd = dictBase + prefixStartIndex;
  483. U32 offset_1=rep[0], offset_2=rep[1];
  484. DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_extDict_generic (srcSize=%zu)", srcSize);
  485. /* if extDict is invalidated due to maxDistance, switch to "regular" variant */
  486. if (prefixStartIndex == dictStartIndex)
  487. return ZSTD_compressBlock_doubleFast(ms, seqStore, rep, src, srcSize);
  488. /* Search Loop */
  489. while (ip < ilimit) { /* < instead of <=, because (ip+1) */
  490. const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
  491. const U32 matchIndex = hashSmall[hSmall];
  492. const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
  493. const BYTE* match = matchBase + matchIndex;
  494. const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
  495. const U32 matchLongIndex = hashLong[hLong];
  496. const BYTE* const matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : base;
  497. const BYTE* matchLong = matchLongBase + matchLongIndex;
  498. const U32 curr = (U32)(ip-base);
  499. const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
  500. const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
  501. const BYTE* const repMatch = repBase + repIndex;
  502. size_t mLength;
  503. hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */
  504. if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */
  505. & (offset_1 <= curr+1 - dictStartIndex)) /* note: we are searching at curr+1 */
  506. && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
  507. const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
  508. mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
  509. ip++;
  510. ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
  511. } else {
  512. if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
  513. const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend;
  514. const BYTE* const lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart;
  515. U32 offset;
  516. mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, prefixStart) + 8;
  517. offset = curr - matchLongIndex;
  518. while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
  519. offset_2 = offset_1;
  520. offset_1 = offset;
  521. ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
  522. } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) {
  523. size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
  524. U32 const matchIndex3 = hashLong[h3];
  525. const BYTE* const match3Base = matchIndex3 < prefixStartIndex ? dictBase : base;
  526. const BYTE* match3 = match3Base + matchIndex3;
  527. U32 offset;
  528. hashLong[h3] = curr + 1;
  529. if ( (matchIndex3 > dictStartIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
  530. const BYTE* const matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend;
  531. const BYTE* const lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart;
  532. mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, prefixStart) + 8;
  533. ip++;
  534. offset = curr+1 - matchIndex3;
  535. while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
  536. } else {
  537. const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
  538. const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
  539. mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
  540. offset = curr - matchIndex;
  541. while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
  542. }
  543. offset_2 = offset_1;
  544. offset_1 = offset;
  545. ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
  546. } else {
  547. ip += ((ip-anchor) >> kSearchStrength) + 1;
  548. continue;
  549. } }
  550. /* move to next sequence start */
  551. ip += mLength;
  552. anchor = ip;
  553. if (ip <= ilimit) {
  554. /* Complementary insertion */
  555. /* done after iLimit test, as candidates could be > iend-8 */
  556. { U32 const indexToInsert = curr+2;
  557. hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
  558. hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
  559. hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
  560. hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
  561. }
  562. /* check immediate repcode */
  563. while (ip <= ilimit) {
  564. U32 const current2 = (U32)(ip-base);
  565. U32 const repIndex2 = current2 - offset_2;
  566. const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
  567. if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */
  568. & (offset_2 <= current2 - dictStartIndex))
  569. && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
  570. const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
  571. size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
  572. U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
  573. ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2);
  574. hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
  575. hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
  576. ip += repLength2;
  577. anchor = ip;
  578. continue;
  579. }
  580. break;
  581. } } }
  582. /* save reps for next block */
  583. rep[0] = offset_1;
  584. rep[1] = offset_2;
  585. /* Return the last literals size */
  586. return (size_t)(iend - anchor);
  587. }
  588. ZSTD_GEN_DFAST_FN(extDict, 4)
  589. ZSTD_GEN_DFAST_FN(extDict, 5)
  590. ZSTD_GEN_DFAST_FN(extDict, 6)
  591. ZSTD_GEN_DFAST_FN(extDict, 7)
  592. size_t ZSTD_compressBlock_doubleFast_extDict(
  593. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  594. void const* src, size_t srcSize)
  595. {
  596. U32 const mls = ms->cParams.minMatch;
  597. switch(mls)
  598. {
  599. default: /* includes case 3 */
  600. case 4 :
  601. return ZSTD_compressBlock_doubleFast_extDict_4(ms, seqStore, rep, src, srcSize);
  602. case 5 :
  603. return ZSTD_compressBlock_doubleFast_extDict_5(ms, seqStore, rep, src, srcSize);
  604. case 6 :
  605. return ZSTD_compressBlock_doubleFast_extDict_6(ms, seqStore, rep, src, srcSize);
  606. case 7 :
  607. return ZSTD_compressBlock_doubleFast_extDict_7(ms, seqStore, rep, src, srcSize);
  608. }
  609. }