LzFindMt.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422
  1. /* LzFindMt.c -- multithreaded Match finder for LZ algorithms
  2. : Igor Pavlov : Public domain */
  3. #include "Precomp.h"
  4. // #include <stdio.h>
  5. #include "CpuArch.h"
  6. #include "LzHash.h"
  7. #include "LzFindMt.h"
  8. // #define LOG_ITERS
  9. // #define LOG_THREAD
  10. #ifdef LOG_THREAD
  11. #include <stdio.h>
  12. #define PRF(x) x
  13. #else
  14. #define PRF(x)
  15. #endif
  16. #ifdef LOG_ITERS
  17. #include <stdio.h>
  18. extern UInt64 g_NumIters_Tree;
  19. extern UInt64 g_NumIters_Loop;
  20. extern UInt64 g_NumIters_Bytes;
  21. #define LOG_ITER(x) x
  22. #else
  23. #define LOG_ITER(x)
  24. #endif
  25. #define kMtHashBlockSize ((UInt32)1 << 17)
  26. #define kMtHashNumBlocks (1 << 1)
  27. #define GET_HASH_BLOCK_OFFSET(i) (((i) & (kMtHashNumBlocks - 1)) * kMtHashBlockSize)
  28. #define kMtBtBlockSize ((UInt32)1 << 16)
  29. #define kMtBtNumBlocks (1 << 4)
  30. #define GET_BT_BLOCK_OFFSET(i) (((i) & (kMtBtNumBlocks - 1)) * (size_t)kMtBtBlockSize)
  31. /*
  32. HASH functions:
  33. We use raw 8/16 bits from a[1] and a[2],
  34. xored with crc(a[0]) and crc(a[3]).
  35. We check a[0], a[3] only. We don't need to compare a[1] and a[2] in matches.
  36. our crc() function provides one-to-one correspondence for low 8-bit values:
  37. (crc[0...0xFF] & 0xFF) <-> [0...0xFF]
  38. */
  39. #define MF(mt) ((mt)->MatchFinder)
  40. #define MF_CRC (p->crc)
  41. // #define MF(mt) (&(mt)->MatchFinder)
  42. // #define MF_CRC (p->MatchFinder.crc)
  43. #define MT_HASH2_CALC \
  44. h2 = (MF_CRC[cur[0]] ^ cur[1]) & (kHash2Size - 1);
  45. #define MT_HASH3_CALC { \
  46. UInt32 temp = MF_CRC[cur[0]] ^ cur[1]; \
  47. h2 = temp & (kHash2Size - 1); \
  48. h3 = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); }
  49. /*
  50. #define MT_HASH3_CALC__NO_2 { \
  51. UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
  52. h3 = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); }
  53. #define MT_HASH4_CALC { \
  54. UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
  55. h2 = temp & (kHash2Size - 1); \
  56. temp ^= ((UInt32)cur[2] << 8); \
  57. h3 = temp & (kHash3Size - 1); \
  58. h4 = (temp ^ (p->crc[cur[3]] << kLzHash_CrcShift_1)) & p->hash4Mask; }
  59. // (kHash4Size - 1);
  60. */
  61. Z7_NO_INLINE
  62. static void MtSync_Construct(CMtSync *p)
  63. {
  64. p->affinityGroup = -1;
  65. p->affinityInGroup = 0;
  66. p->affinity = 0;
  67. p->wasCreated = False;
  68. p->csWasInitialized = False;
  69. p->csWasEntered = False;
  70. Thread_CONSTRUCT(&p->thread)
  71. Event_Construct(&p->canStart);
  72. Event_Construct(&p->wasStopped);
  73. Semaphore_Construct(&p->freeSemaphore);
  74. Semaphore_Construct(&p->filledSemaphore);
  75. }
  76. // #define DEBUG_BUFFER_LOCK // define it to debug lock state
  77. #ifdef DEBUG_BUFFER_LOCK
  78. #include <stdlib.h>
  79. #define BUFFER_MUST_BE_LOCKED(p) if (!(p)->csWasEntered) exit(1);
  80. #define BUFFER_MUST_BE_UNLOCKED(p) if ( (p)->csWasEntered) exit(1);
  81. #else
  82. #define BUFFER_MUST_BE_LOCKED(p)
  83. #define BUFFER_MUST_BE_UNLOCKED(p)
  84. #endif
  85. #define LOCK_BUFFER(p) { \
  86. BUFFER_MUST_BE_UNLOCKED(p); \
  87. CriticalSection_Enter(&(p)->cs); \
  88. (p)->csWasEntered = True; }
  89. #define UNLOCK_BUFFER(p) { \
  90. BUFFER_MUST_BE_LOCKED(p); \
  91. CriticalSection_Leave(&(p)->cs); \
  92. (p)->csWasEntered = False; }
  93. Z7_NO_INLINE
  94. static UInt32 MtSync_GetNextBlock(CMtSync *p)
  95. {
  96. UInt32 numBlocks = 0;
  97. if (p->needStart)
  98. {
  99. BUFFER_MUST_BE_UNLOCKED(p)
  100. p->numProcessedBlocks = 1;
  101. p->needStart = False;
  102. p->stopWriting = False;
  103. p->exit = False;
  104. Event_Reset(&p->wasStopped);
  105. Event_Set(&p->canStart);
  106. }
  107. else
  108. {
  109. UNLOCK_BUFFER(p)
  110. // we free current block
  111. numBlocks = p->numProcessedBlocks++;
  112. Semaphore_Release1(&p->freeSemaphore);
  113. }
  114. // buffer is UNLOCKED here
  115. Semaphore_Wait(&p->filledSemaphore);
  116. LOCK_BUFFER(p)
  117. return numBlocks;
  118. }
  119. /* if Writing (Processing) thread was started, we must call MtSync_StopWriting() */
  120. Z7_NO_INLINE
  121. static void MtSync_StopWriting(CMtSync *p)
  122. {
  123. if (!Thread_WasCreated(&p->thread) || p->needStart)
  124. return;
  125. PRF(printf("\nMtSync_StopWriting %p\n", p));
  126. if (p->csWasEntered)
  127. {
  128. /* we don't use buffer in this thread after StopWriting().
  129. So we UNLOCK buffer.
  130. And we restore default UNLOCKED state for stopped thread */
  131. UNLOCK_BUFFER(p)
  132. }
  133. /* We send (p->stopWriting) message and release freeSemaphore
  134. to free current block.
  135. So the thread will see (p->stopWriting) at some
  136. iteration after Wait(freeSemaphore).
  137. The thread doesn't need to fill all avail free blocks,
  138. so we can get fast thread stop.
  139. */
  140. p->stopWriting = True;
  141. Semaphore_Release1(&p->freeSemaphore); // check semaphore count !!!
  142. PRF(printf("\nMtSync_StopWriting %p : Event_Wait(&p->wasStopped)\n", p));
  143. Event_Wait(&p->wasStopped);
  144. PRF(printf("\nMtSync_StopWriting %p : Event_Wait() finsihed\n", p));
  145. /* 21.03 : we don't restore samaphore counters here.
  146. We will recreate and reinit samaphores in next start */
  147. p->needStart = True;
  148. }
  149. Z7_NO_INLINE
  150. static void MtSync_Destruct(CMtSync *p)
  151. {
  152. PRF(printf("\nMtSync_Destruct %p\n", p));
  153. if (Thread_WasCreated(&p->thread))
  154. {
  155. /* we want thread to be in Stopped state before sending EXIT command.
  156. note: stop(btSync) will stop (htSync) also */
  157. MtSync_StopWriting(p);
  158. /* thread in Stopped state here : (p->needStart == true) */
  159. p->exit = True;
  160. // if (p->needStart) // it's (true)
  161. Event_Set(&p->canStart); // we send EXIT command to thread
  162. Thread_Wait_Close(&p->thread); // we wait thread finishing
  163. }
  164. if (p->csWasInitialized)
  165. {
  166. CriticalSection_Delete(&p->cs);
  167. p->csWasInitialized = False;
  168. }
  169. p->csWasEntered = False;
  170. Event_Close(&p->canStart);
  171. Event_Close(&p->wasStopped);
  172. Semaphore_Close(&p->freeSemaphore);
  173. Semaphore_Close(&p->filledSemaphore);
  174. p->wasCreated = False;
  175. }
  176. // #define RINOK_THREAD(x) { if ((x) != 0) return SZ_ERROR_THREAD; }
  177. // we want to get real system error codes here instead of SZ_ERROR_THREAD
  178. #define RINOK_THREAD(x) RINOK_WRes(x)
  179. // call it before each new file (when new starting is required):
  180. Z7_NO_INLINE
  181. static SRes MtSync_Init(CMtSync *p, UInt32 numBlocks)
  182. {
  183. WRes wres;
  184. // BUFFER_MUST_BE_UNLOCKED(p)
  185. if (!p->needStart || p->csWasEntered)
  186. return SZ_ERROR_FAIL;
  187. wres = Semaphore_OptCreateInit(&p->freeSemaphore, numBlocks, numBlocks);
  188. if (wres == 0)
  189. wres = Semaphore_OptCreateInit(&p->filledSemaphore, 0, numBlocks);
  190. return MY_SRes_HRESULT_FROM_WRes(wres);
  191. }
  192. static WRes MtSync_Create_WRes(CMtSync *p, THREAD_FUNC_TYPE startAddress, void *obj)
  193. {
  194. WRes wres;
  195. if (p->wasCreated)
  196. return SZ_OK;
  197. RINOK_THREAD(CriticalSection_Init(&p->cs))
  198. p->csWasInitialized = True;
  199. p->csWasEntered = False;
  200. RINOK_THREAD(AutoResetEvent_CreateNotSignaled(&p->canStart))
  201. RINOK_THREAD(AutoResetEvent_CreateNotSignaled(&p->wasStopped))
  202. p->needStart = True;
  203. p->exit = True; /* p->exit is unused before (canStart) Event.
  204. But in case of some unexpected code failure we will get fast exit from thread */
  205. // return ERROR_TOO_MANY_POSTS; // for debug
  206. // return EINVAL; // for debug
  207. #ifdef _WIN32
  208. if (p->affinityGroup >= 0)
  209. wres = Thread_Create_With_Group(&p->thread, startAddress, obj,
  210. (unsigned)(UInt32)p->affinityGroup, (CAffinityMask)p->affinityInGroup);
  211. else
  212. #endif
  213. if (p->affinity != 0)
  214. wres = Thread_Create_With_Affinity(&p->thread, startAddress, obj, (CAffinityMask)p->affinity);
  215. else
  216. wres = Thread_Create(&p->thread, startAddress, obj);
  217. RINOK_THREAD(wres)
  218. p->wasCreated = True;
  219. return SZ_OK;
  220. }
  221. Z7_NO_INLINE
  222. static SRes MtSync_Create(CMtSync *p, THREAD_FUNC_TYPE startAddress, void *obj)
  223. {
  224. const WRes wres = MtSync_Create_WRes(p, startAddress, obj);
  225. if (wres == 0)
  226. return 0;
  227. MtSync_Destruct(p);
  228. return MY_SRes_HRESULT_FROM_WRes(wres);
  229. }
  230. // ---------- HASH THREAD ----------
  231. #define kMtMaxValForNormalize 0xFFFFFFFF
  232. // #define kMtMaxValForNormalize ((1 << 21)) // for debug
  233. // #define kNormalizeAlign (1 << 7) // alignment for speculated accesses
  234. #ifdef MY_CPU_LE_UNALIGN
  235. #define GetUi24hi_from32(p) ((UInt32)GetUi32(p) >> 8)
  236. #else
  237. #define GetUi24hi_from32(p) ((p)[1] ^ ((UInt32)(p)[2] << 8) ^ ((UInt32)(p)[3] << 16))
  238. #endif
  239. #define GetHeads_DECL(name) \
  240. static void GetHeads ## name(const Byte *p, UInt32 pos, \
  241. UInt32 *hash, UInt32 hashMask, UInt32 *heads, UInt32 numHeads, const UInt32 *crc)
  242. #define GetHeads_LOOP(v) \
  243. for (; numHeads != 0; numHeads--) { \
  244. const UInt32 value = (v); \
  245. p++; \
  246. *heads++ = pos - hash[value]; \
  247. hash[value] = pos++; }
  248. #define DEF_GetHeads2(name, v, action) \
  249. GetHeads_DECL(name) { action \
  250. GetHeads_LOOP(v) }
  251. #define DEF_GetHeads(name, v) DEF_GetHeads2(name, v, ;)
  252. DEF_GetHeads2(2, GetUi16(p), UNUSED_VAR(hashMask); UNUSED_VAR(crc); )
  253. DEF_GetHeads(3, (crc[p[0]] ^ GetUi16(p + 1)) & hashMask)
  254. DEF_GetHeads2(3b, GetUi16(p) ^ ((UInt32)(p)[2] << 16), UNUSED_VAR(hashMask); UNUSED_VAR(crc); )
  255. // BT3 is not good for crc collisions for big hashMask values.
  256. /*
  257. GetHeads_DECL(3b)
  258. {
  259. UNUSED_VAR(hashMask);
  260. UNUSED_VAR(crc);
  261. {
  262. const Byte *pLim = p + numHeads;
  263. if (numHeads == 0)
  264. return;
  265. pLim--;
  266. while (p < pLim)
  267. {
  268. UInt32 v1 = GetUi32(p);
  269. UInt32 v0 = v1 & 0xFFFFFF;
  270. UInt32 h0, h1;
  271. p += 2;
  272. v1 >>= 8;
  273. h0 = hash[v0]; hash[v0] = pos; heads[0] = pos - h0; pos++;
  274. h1 = hash[v1]; hash[v1] = pos; heads[1] = pos - h1; pos++;
  275. heads += 2;
  276. }
  277. if (p == pLim)
  278. {
  279. UInt32 v0 = GetUi16(p) ^ ((UInt32)(p)[2] << 16);
  280. *heads = pos - hash[v0];
  281. hash[v0] = pos;
  282. }
  283. }
  284. }
  285. */
  286. /*
  287. GetHeads_DECL(4)
  288. {
  289. unsigned sh = 0;
  290. UNUSED_VAR(crc)
  291. while ((hashMask & 0x80000000) == 0)
  292. {
  293. hashMask <<= 1;
  294. sh++;
  295. }
  296. GetHeads_LOOP((GetUi32(p) * 0xa54a1) >> sh)
  297. }
  298. #define GetHeads4b GetHeads4
  299. */
  300. #define USE_GetHeads_LOCAL_CRC
  301. #ifdef USE_GetHeads_LOCAL_CRC
  302. GetHeads_DECL(4)
  303. {
  304. UInt32 crc0[256];
  305. UInt32 crc1[256];
  306. {
  307. unsigned i;
  308. for (i = 0; i < 256; i++)
  309. {
  310. UInt32 v = crc[i];
  311. crc0[i] = v & hashMask;
  312. crc1[i] = (v << kLzHash_CrcShift_1) & hashMask;
  313. // crc1[i] = rotlFixed(v, 8) & hashMask;
  314. }
  315. }
  316. GetHeads_LOOP(crc0[p[0]] ^ crc1[p[3]] ^ (UInt32)GetUi16(p+1))
  317. }
  318. GetHeads_DECL(4b)
  319. {
  320. UInt32 crc0[256];
  321. {
  322. unsigned i;
  323. for (i = 0; i < 256; i++)
  324. crc0[i] = crc[i] & hashMask;
  325. }
  326. GetHeads_LOOP(crc0[p[0]] ^ GetUi24hi_from32(p))
  327. }
  328. GetHeads_DECL(5)
  329. {
  330. UInt32 crc0[256];
  331. UInt32 crc1[256];
  332. UInt32 crc2[256];
  333. {
  334. unsigned i;
  335. for (i = 0; i < 256; i++)
  336. {
  337. UInt32 v = crc[i];
  338. crc0[i] = v & hashMask;
  339. crc1[i] = (v << kLzHash_CrcShift_1) & hashMask;
  340. crc2[i] = (v << kLzHash_CrcShift_2) & hashMask;
  341. }
  342. }
  343. GetHeads_LOOP(crc0[p[0]] ^ crc1[p[3]] ^ crc2[p[4]] ^ (UInt32)GetUi16(p+1))
  344. }
  345. GetHeads_DECL(5b)
  346. {
  347. UInt32 crc0[256];
  348. UInt32 crc1[256];
  349. {
  350. unsigned i;
  351. for (i = 0; i < 256; i++)
  352. {
  353. UInt32 v = crc[i];
  354. crc0[i] = v & hashMask;
  355. crc1[i] = (v << kLzHash_CrcShift_1) & hashMask;
  356. }
  357. }
  358. GetHeads_LOOP(crc0[p[0]] ^ crc1[p[4]] ^ GetUi24hi_from32(p))
  359. }
  360. #else
  361. DEF_GetHeads(4, (crc[p[0]] ^ (crc[p[3]] << kLzHash_CrcShift_1) ^ (UInt32)GetUi16(p+1)) & hashMask)
  362. DEF_GetHeads(4b, (crc[p[0]] ^ GetUi24hi_from32(p)) & hashMask)
  363. DEF_GetHeads(5, (crc[p[0]] ^ (crc[p[3]] << kLzHash_CrcShift_1) ^ (crc[p[4]] << kLzHash_CrcShift_2) ^ (UInt32)GetUi16(p + 1)) & hashMask)
  364. DEF_GetHeads(5b, (crc[p[0]] ^ (crc[p[4]] << kLzHash_CrcShift_1) ^ GetUi24hi_from32(p)) & hashMask)
  365. #endif
  366. static void HashThreadFunc(CMatchFinderMt *mt)
  367. {
  368. CMtSync *p = &mt->hashSync;
  369. PRF(printf("\nHashThreadFunc\n"));
  370. for (;;)
  371. {
  372. UInt32 blockIndex = 0;
  373. PRF(printf("\nHashThreadFunc : Event_Wait(&p->canStart)\n"));
  374. Event_Wait(&p->canStart);
  375. PRF(printf("\nHashThreadFunc : Event_Wait(&p->canStart) : after \n"));
  376. if (p->exit)
  377. {
  378. PRF(printf("\nHashThreadFunc : exit \n"));
  379. return;
  380. }
  381. MatchFinder_Init_HighHash(MF(mt));
  382. for (;;)
  383. {
  384. PRF(printf("Hash thread block = %d pos = %d\n", (unsigned)blockIndex, mt->MatchFinder->pos));
  385. {
  386. CMatchFinder *mf = MF(mt);
  387. if (MatchFinder_NeedMove(mf))
  388. {
  389. CriticalSection_Enter(&mt->btSync.cs);
  390. CriticalSection_Enter(&mt->hashSync.cs);
  391. {
  392. const Byte *beforePtr = Inline_MatchFinder_GetPointerToCurrentPos(mf);
  393. ptrdiff_t offset;
  394. MatchFinder_MoveBlock(mf);
  395. offset = beforePtr - Inline_MatchFinder_GetPointerToCurrentPos(mf);
  396. mt->pointerToCurPos -= offset;
  397. mt->buffer -= offset;
  398. }
  399. CriticalSection_Leave(&mt->hashSync.cs);
  400. CriticalSection_Leave(&mt->btSync.cs);
  401. continue;
  402. }
  403. Semaphore_Wait(&p->freeSemaphore);
  404. if (p->exit) // exit is unexpected here. But we check it here for some failure case
  405. return;
  406. // for faster stop : we check (p->stopWriting) after Wait(freeSemaphore)
  407. if (p->stopWriting)
  408. break;
  409. MatchFinder_ReadIfRequired(mf);
  410. {
  411. UInt32 *heads = mt->hashBuf + GET_HASH_BLOCK_OFFSET(blockIndex++);
  412. UInt32 num = Inline_MatchFinder_GetNumAvailableBytes(mf);
  413. heads[0] = 2;
  414. heads[1] = num;
  415. /* heads[1] contains the number of avail bytes:
  416. if (avail < mf->numHashBytes) :
  417. {
  418. it means that stream was finished
  419. HASH_THREAD and BT_TREAD must move position for heads[1] (avail) bytes.
  420. HASH_THREAD doesn't stop,
  421. HASH_THREAD fills only the header (2 numbers) for all next blocks:
  422. {2, NumHashBytes - 1}, {2,0}, {2,0}, ... , {2,0}
  423. }
  424. else
  425. {
  426. HASH_THREAD and BT_TREAD must move position for (heads[0] - 2) bytes;
  427. }
  428. */
  429. if (num >= mf->numHashBytes)
  430. {
  431. num = num - mf->numHashBytes + 1;
  432. if (num > kMtHashBlockSize - 2)
  433. num = kMtHashBlockSize - 2;
  434. if (mf->pos > (UInt32)kMtMaxValForNormalize - num)
  435. {
  436. const UInt32 subValue = (mf->pos - mf->historySize - 1); // & ~(UInt32)(kNormalizeAlign - 1);
  437. MatchFinder_REDUCE_OFFSETS(mf, subValue)
  438. MatchFinder_Normalize3(subValue, mf->hash + mf->fixedHashSize, (size_t)mf->hashMask + 1);
  439. }
  440. heads[0] = 2 + num;
  441. mt->GetHeadsFunc(mf->buffer, mf->pos, mf->hash + mf->fixedHashSize, mf->hashMask, heads + 2, num, mf->crc);
  442. }
  443. mf->pos += num; // wrap over zero is allowed at the end of stream
  444. mf->buffer += num;
  445. }
  446. }
  447. Semaphore_Release1(&p->filledSemaphore);
  448. } // for() processing end
  449. // p->numBlocks_Sent = blockIndex;
  450. Event_Set(&p->wasStopped);
  451. } // for() thread end
  452. }
  453. // ---------- BT THREAD ----------
  454. /* we use one variable instead of two (cyclicBufferPos == pos) before CyclicBuf wrap.
  455. here we define fixed offset of (p->pos) from (p->cyclicBufferPos) */
  456. #define CYC_TO_POS_OFFSET 0
  457. // #define CYC_TO_POS_OFFSET 1 // for debug
  458. #define MFMT_GM_INLINE
  459. #ifdef MFMT_GM_INLINE
  460. /*
  461. we use size_t for (pos) instead of UInt32
  462. to eliminate "movsx" BUG in old MSVC x64 compiler.
  463. */
  464. UInt32 * Z7_FASTCALL GetMatchesSpecN_2(const Byte *lenLimit, size_t pos, const Byte *cur, CLzRef *son,
  465. UInt32 _cutValue, UInt32 *d, size_t _maxLen, const UInt32 *hash, const UInt32 *limit, const UInt32 *size,
  466. size_t _cyclicBufferPos, UInt32 _cyclicBufferSize,
  467. UInt32 *posRes);
  468. #endif
  469. static void BtGetMatches(CMatchFinderMt *p, UInt32 *d)
  470. {
  471. UInt32 numProcessed = 0;
  472. UInt32 curPos = 2;
  473. /* GetMatchesSpec() functions don't create (len = 1)
  474. in [len, dist] match pairs, if (p->numHashBytes >= 2)
  475. Also we suppose here that (matchMaxLen >= 2).
  476. So the following code for (reserve) is not required
  477. UInt32 reserve = (p->matchMaxLen * 2);
  478. const UInt32 kNumHashBytes_Max = 5; // BT_HASH_BYTES_MAX
  479. if (reserve < kNumHashBytes_Max - 1)
  480. reserve = kNumHashBytes_Max - 1;
  481. const UInt32 limit = kMtBtBlockSize - (reserve);
  482. */
  483. const UInt32 limit = kMtBtBlockSize - (p->matchMaxLen * 2);
  484. d[1] = p->hashNumAvail;
  485. if (p->failure_BT)
  486. {
  487. // printf("\n == 1 BtGetMatches() p->failure_BT\n");
  488. d[0] = 0;
  489. // d[1] = 0;
  490. return;
  491. }
  492. while (curPos < limit)
  493. {
  494. if (p->hashBufPos == p->hashBufPosLimit)
  495. {
  496. // MatchFinderMt_GetNextBlock_Hash(p);
  497. UInt32 avail;
  498. {
  499. const UInt32 bi = MtSync_GetNextBlock(&p->hashSync);
  500. const UInt32 k = GET_HASH_BLOCK_OFFSET(bi);
  501. const UInt32 *h = p->hashBuf + k;
  502. avail = h[1];
  503. p->hashBufPosLimit = k + h[0];
  504. p->hashNumAvail = avail;
  505. p->hashBufPos = k + 2;
  506. }
  507. {
  508. /* we must prevent UInt32 overflow for avail total value,
  509. if avail was increased with new hash block */
  510. UInt32 availSum = numProcessed + avail;
  511. if (availSum < numProcessed)
  512. availSum = (UInt32)(Int32)-1;
  513. d[1] = availSum;
  514. }
  515. if (avail >= p->numHashBytes)
  516. continue;
  517. // if (p->hashBufPos != p->hashBufPosLimit) exit(1);
  518. /* (avail < p->numHashBytes)
  519. It means that stream was finished.
  520. And (avail) - is a number of remaining bytes,
  521. we fill (d) for (avail) bytes for LZ_THREAD (receiver).
  522. but we don't update (p->pos) and (p->cyclicBufferPos) here in BT_THREAD */
  523. /* here we suppose that we have space enough:
  524. (kMtBtBlockSize - curPos >= p->hashNumAvail) */
  525. p->hashNumAvail = 0;
  526. d[0] = curPos + avail;
  527. d += curPos;
  528. for (; avail != 0; avail--)
  529. *d++ = 0;
  530. return;
  531. }
  532. {
  533. UInt32 size = p->hashBufPosLimit - p->hashBufPos;
  534. UInt32 pos = p->pos;
  535. UInt32 cyclicBufferPos = p->cyclicBufferPos;
  536. UInt32 lenLimit = p->matchMaxLen;
  537. if (lenLimit >= p->hashNumAvail)
  538. lenLimit = p->hashNumAvail;
  539. {
  540. UInt32 size2 = p->hashNumAvail - lenLimit + 1;
  541. if (size2 < size)
  542. size = size2;
  543. size2 = p->cyclicBufferSize - cyclicBufferPos;
  544. if (size2 < size)
  545. size = size2;
  546. }
  547. if (pos > (UInt32)kMtMaxValForNormalize - size)
  548. {
  549. const UInt32 subValue = (pos - p->cyclicBufferSize); // & ~(UInt32)(kNormalizeAlign - 1);
  550. pos -= subValue;
  551. p->pos = pos;
  552. MatchFinder_Normalize3(subValue, p->son, (size_t)p->cyclicBufferSize * 2);
  553. }
  554. #ifndef MFMT_GM_INLINE
  555. while (curPos < limit && size-- != 0)
  556. {
  557. UInt32 *startDistances = d + curPos;
  558. UInt32 num = (UInt32)(GetMatchesSpec1(lenLimit, pos - p->hashBuf[p->hashBufPos++],
  559. pos, p->buffer, p->son, cyclicBufferPos, p->cyclicBufferSize, p->cutValue,
  560. startDistances + 1, p->numHashBytes - 1) - startDistances);
  561. *startDistances = num - 1;
  562. curPos += num;
  563. cyclicBufferPos++;
  564. pos++;
  565. p->buffer++;
  566. }
  567. #else
  568. {
  569. UInt32 posRes = pos;
  570. const UInt32 *d_end;
  571. {
  572. d_end = GetMatchesSpecN_2(
  573. p->buffer + lenLimit - 1,
  574. pos, p->buffer, p->son, p->cutValue, d + curPos,
  575. p->numHashBytes - 1, p->hashBuf + p->hashBufPos,
  576. d + limit, p->hashBuf + p->hashBufPos + size,
  577. cyclicBufferPos, p->cyclicBufferSize,
  578. &posRes);
  579. }
  580. {
  581. if (!d_end)
  582. {
  583. // printf("\n == 2 BtGetMatches() p->failure_BT\n");
  584. // internal data failure
  585. p->failure_BT = True;
  586. d[0] = 0;
  587. // d[1] = 0;
  588. return;
  589. }
  590. }
  591. curPos = (UInt32)(d_end - d);
  592. {
  593. const UInt32 processed = posRes - pos;
  594. pos = posRes;
  595. p->hashBufPos += processed;
  596. cyclicBufferPos += processed;
  597. p->buffer += processed;
  598. }
  599. }
  600. #endif
  601. {
  602. const UInt32 processed = pos - p->pos;
  603. numProcessed += processed;
  604. p->hashNumAvail -= processed;
  605. p->pos = pos;
  606. }
  607. if (cyclicBufferPos == p->cyclicBufferSize)
  608. cyclicBufferPos = 0;
  609. p->cyclicBufferPos = cyclicBufferPos;
  610. }
  611. }
  612. d[0] = curPos;
  613. }
  614. static void BtFillBlock(CMatchFinderMt *p, UInt32 globalBlockIndex)
  615. {
  616. CMtSync *sync = &p->hashSync;
  617. BUFFER_MUST_BE_UNLOCKED(sync)
  618. if (!sync->needStart)
  619. {
  620. LOCK_BUFFER(sync)
  621. }
  622. BtGetMatches(p, p->btBuf + GET_BT_BLOCK_OFFSET(globalBlockIndex));
  623. /* We suppose that we have called GetNextBlock() from start.
  624. So buffer is LOCKED */
  625. UNLOCK_BUFFER(sync)
  626. }
  627. Z7_NO_INLINE
  628. static void BtThreadFunc(CMatchFinderMt *mt)
  629. {
  630. CMtSync *p = &mt->btSync;
  631. for (;;)
  632. {
  633. UInt32 blockIndex = 0;
  634. Event_Wait(&p->canStart);
  635. for (;;)
  636. {
  637. PRF(printf(" BT thread block = %d pos = %d\n", (unsigned)blockIndex, mt->pos));
  638. /* (p->exit == true) is possible after (p->canStart) at first loop iteration
  639. and is unexpected after more Wait(freeSemaphore) iterations */
  640. if (p->exit)
  641. return;
  642. Semaphore_Wait(&p->freeSemaphore);
  643. // for faster stop : we check (p->stopWriting) after Wait(freeSemaphore)
  644. if (p->stopWriting)
  645. break;
  646. BtFillBlock(mt, blockIndex++);
  647. Semaphore_Release1(&p->filledSemaphore);
  648. }
  649. // we stop HASH_THREAD here
  650. MtSync_StopWriting(&mt->hashSync);
  651. // p->numBlocks_Sent = blockIndex;
  652. Event_Set(&p->wasStopped);
  653. }
  654. }
  655. void MatchFinderMt_Construct(CMatchFinderMt *p)
  656. {
  657. p->hashBuf = NULL;
  658. MtSync_Construct(&p->hashSync);
  659. MtSync_Construct(&p->btSync);
  660. }
  661. static void MatchFinderMt_FreeMem(CMatchFinderMt *p, ISzAllocPtr alloc)
  662. {
  663. ISzAlloc_Free(alloc, p->hashBuf);
  664. p->hashBuf = NULL;
  665. }
  666. void MatchFinderMt_Destruct(CMatchFinderMt *p, ISzAllocPtr alloc)
  667. {
  668. /*
  669. HASH_THREAD can use CriticalSection(s) btSync.cs and hashSync.cs.
  670. So we must be sure that HASH_THREAD will not use CriticalSection(s)
  671. after deleting CriticalSection here.
  672. we call ReleaseStream(p)
  673. that calls StopWriting(btSync)
  674. that calls StopWriting(hashSync), if it's required to stop HASH_THREAD.
  675. after StopWriting() it's safe to destruct MtSync(s) in any order */
  676. MatchFinderMt_ReleaseStream(p);
  677. MtSync_Destruct(&p->btSync);
  678. MtSync_Destruct(&p->hashSync);
  679. LOG_ITER(
  680. printf("\nTree %9d * %7d iter = %9d = sum : bytes = %9d\n",
  681. (UInt32)(g_NumIters_Tree / 1000),
  682. (UInt32)(((UInt64)g_NumIters_Loop * 1000) / (g_NumIters_Tree + 1)),
  683. (UInt32)(g_NumIters_Loop / 1000),
  684. (UInt32)(g_NumIters_Bytes / 1000)
  685. ));
  686. MatchFinderMt_FreeMem(p, alloc);
  687. }
  688. #define kHashBufferSize (kMtHashBlockSize * kMtHashNumBlocks)
  689. #define kBtBufferSize (kMtBtBlockSize * kMtBtNumBlocks)
  690. static THREAD_FUNC_DECL HashThreadFunc2(void *p) { HashThreadFunc((CMatchFinderMt *)p); return 0; }
  691. static THREAD_FUNC_DECL BtThreadFunc2(void *p)
  692. {
  693. Byte allocaDummy[0x180];
  694. unsigned i = 0;
  695. for (i = 0; i < 16; i++)
  696. allocaDummy[i] = (Byte)0;
  697. if (allocaDummy[0] == 0)
  698. BtThreadFunc((CMatchFinderMt *)p);
  699. return 0;
  700. }
  701. SRes MatchFinderMt_Create(CMatchFinderMt *p, UInt32 historySize, UInt32 keepAddBufferBefore,
  702. UInt32 matchMaxLen, UInt32 keepAddBufferAfter, ISzAllocPtr alloc)
  703. {
  704. CMatchFinder *mf = MF(p);
  705. p->historySize = historySize;
  706. if (kMtBtBlockSize <= matchMaxLen * 4)
  707. return SZ_ERROR_PARAM;
  708. if (!p->hashBuf)
  709. {
  710. p->hashBuf = (UInt32 *)ISzAlloc_Alloc(alloc, ((size_t)kHashBufferSize + (size_t)kBtBufferSize) * sizeof(UInt32));
  711. if (!p->hashBuf)
  712. return SZ_ERROR_MEM;
  713. p->btBuf = p->hashBuf + kHashBufferSize;
  714. }
  715. keepAddBufferBefore += (kHashBufferSize + kBtBufferSize);
  716. keepAddBufferAfter += kMtHashBlockSize;
  717. if (!MatchFinder_Create(mf, historySize, keepAddBufferBefore, matchMaxLen, keepAddBufferAfter, alloc))
  718. return SZ_ERROR_MEM;
  719. RINOK(MtSync_Create(&p->hashSync, HashThreadFunc2, p))
  720. RINOK(MtSync_Create(&p->btSync, BtThreadFunc2, p))
  721. return SZ_OK;
  722. }
  723. SRes MatchFinderMt_InitMt(CMatchFinderMt *p)
  724. {
  725. RINOK(MtSync_Init(&p->hashSync, kMtHashNumBlocks))
  726. return MtSync_Init(&p->btSync, kMtBtNumBlocks);
  727. }
  728. static void MatchFinderMt_Init(void *_p)
  729. {
  730. CMatchFinderMt *p = (CMatchFinderMt *)_p;
  731. CMatchFinder *mf = MF(p);
  732. p->btBufPos =
  733. p->btBufPosLimit = NULL;
  734. p->hashBufPos =
  735. p->hashBufPosLimit = 0;
  736. p->hashNumAvail = 0; // 21.03
  737. p->failure_BT = False;
  738. /* Init without data reading. We don't want to read data in this thread */
  739. MatchFinder_Init_4(mf);
  740. MatchFinder_Init_LowHash(mf);
  741. p->pointerToCurPos = Inline_MatchFinder_GetPointerToCurrentPos(mf);
  742. p->btNumAvailBytes = 0;
  743. p->failure_LZ_BT = False;
  744. // p->failure_LZ_LZ = False;
  745. p->lzPos =
  746. 1; // optimal smallest value
  747. // 0; // for debug: ignores match to start
  748. // kNormalizeAlign; // for debug
  749. p->hash = mf->hash;
  750. p->fixedHashSize = mf->fixedHashSize;
  751. // p->hash4Mask = mf->hash4Mask;
  752. p->crc = mf->crc;
  753. // memcpy(p->crc, mf->crc, sizeof(mf->crc));
  754. p->son = mf->son;
  755. p->matchMaxLen = mf->matchMaxLen;
  756. p->numHashBytes = mf->numHashBytes;
  757. /* (mf->pos) and (mf->streamPos) were already initialized to 1 in MatchFinder_Init_4() */
  758. // mf->streamPos = mf->pos = 1; // optimal smallest value
  759. // 0; // for debug: ignores match to start
  760. // kNormalizeAlign; // for debug
  761. /* we must init (p->pos = mf->pos) for BT, because
  762. BT code needs (p->pos == delta_value_for_empty_hash_record == mf->pos) */
  763. p->pos = mf->pos; // do not change it
  764. p->cyclicBufferPos = (p->pos - CYC_TO_POS_OFFSET);
  765. p->cyclicBufferSize = mf->cyclicBufferSize;
  766. p->buffer = mf->buffer;
  767. p->cutValue = mf->cutValue;
  768. // p->son[0] = p->son[1] = 0; // unused: to init skipped record for speculated accesses.
  769. }
  770. /* ReleaseStream is required to finish multithreading */
  771. void MatchFinderMt_ReleaseStream(CMatchFinderMt *p)
  772. {
  773. // Sleep(1); // for debug
  774. MtSync_StopWriting(&p->btSync);
  775. // Sleep(200); // for debug
  776. /* p->MatchFinder->ReleaseStream(); */
  777. }
  778. Z7_NO_INLINE
  779. static UInt32 MatchFinderMt_GetNextBlock_Bt(CMatchFinderMt *p)
  780. {
  781. if (p->failure_LZ_BT)
  782. p->btBufPos = p->failureBuf;
  783. else
  784. {
  785. const UInt32 bi = MtSync_GetNextBlock(&p->btSync);
  786. const UInt32 *bt = p->btBuf + GET_BT_BLOCK_OFFSET(bi);
  787. {
  788. const UInt32 numItems = bt[0];
  789. p->btBufPosLimit = bt + numItems;
  790. p->btNumAvailBytes = bt[1];
  791. p->btBufPos = bt + 2;
  792. if (numItems < 2 || numItems > kMtBtBlockSize)
  793. {
  794. p->failureBuf[0] = 0;
  795. p->btBufPos = p->failureBuf;
  796. p->btBufPosLimit = p->failureBuf + 1;
  797. p->failure_LZ_BT = True;
  798. // p->btNumAvailBytes = 0;
  799. /* we don't want to decrease AvailBytes, that was load before.
  800. that can be unxepected for the code that have loaded anopther value before */
  801. }
  802. }
  803. if (p->lzPos >= (UInt32)kMtMaxValForNormalize - (UInt32)kMtBtBlockSize)
  804. {
  805. /* we don't check (lzPos) over exact avail bytes in (btBuf).
  806. (fixedHashSize) is small, so normalization is fast */
  807. const UInt32 subValue = (p->lzPos - p->historySize - 1); // & ~(UInt32)(kNormalizeAlign - 1);
  808. p->lzPos -= subValue;
  809. MatchFinder_Normalize3(subValue, p->hash, p->fixedHashSize);
  810. }
  811. }
  812. return p->btNumAvailBytes;
  813. }
  814. static const Byte * MatchFinderMt_GetPointerToCurrentPos(void *_p)
  815. {
  816. CMatchFinderMt *p = (CMatchFinderMt *)_p;
  817. return p->pointerToCurPos;
  818. }
  819. #define GET_NEXT_BLOCK_IF_REQUIRED if (p->btBufPos == p->btBufPosLimit) MatchFinderMt_GetNextBlock_Bt(p);
  820. static UInt32 MatchFinderMt_GetNumAvailableBytes(void *_p)
  821. {
  822. CMatchFinderMt *p = (CMatchFinderMt *)_p;
  823. if (p->btBufPos != p->btBufPosLimit)
  824. return p->btNumAvailBytes;
  825. return MatchFinderMt_GetNextBlock_Bt(p);
  826. }
  827. // #define CHECK_FAILURE_LZ(_match_, _pos_) if (_match_ >= _pos_) { p->failure_LZ_LZ = True; return d; }
  828. #define CHECK_FAILURE_LZ(_match_, _pos_)
  829. static UInt32 * MixMatches2(CMatchFinderMt *p, UInt32 matchMinPos, UInt32 *d)
  830. {
  831. UInt32 h2, c2;
  832. UInt32 *hash = p->hash;
  833. const Byte *cur = p->pointerToCurPos;
  834. const UInt32 m = p->lzPos;
  835. MT_HASH2_CALC
  836. c2 = hash[h2];
  837. hash[h2] = m;
  838. if (c2 >= matchMinPos)
  839. {
  840. CHECK_FAILURE_LZ(c2, m)
  841. if (cur[(ptrdiff_t)c2 - (ptrdiff_t)m] == cur[0])
  842. {
  843. *d++ = 2;
  844. *d++ = m - c2 - 1;
  845. }
  846. }
  847. return d;
  848. }
  849. static UInt32 * MixMatches3(CMatchFinderMt *p, UInt32 matchMinPos, UInt32 *d)
  850. {
  851. UInt32 h2, h3, c2, c3;
  852. UInt32 *hash = p->hash;
  853. const Byte *cur = p->pointerToCurPos;
  854. const UInt32 m = p->lzPos;
  855. MT_HASH3_CALC
  856. c2 = hash[h2];
  857. c3 = (hash + kFix3HashSize)[h3];
  858. hash[h2] = m;
  859. (hash + kFix3HashSize)[h3] = m;
  860. if (c2 >= matchMinPos)
  861. {
  862. CHECK_FAILURE_LZ(c2, m)
  863. if (cur[(ptrdiff_t)c2 - (ptrdiff_t)m] == cur[0])
  864. {
  865. d[1] = m - c2 - 1;
  866. if (cur[(ptrdiff_t)c2 - (ptrdiff_t)m + 2] == cur[2])
  867. {
  868. d[0] = 3;
  869. return d + 2;
  870. }
  871. d[0] = 2;
  872. d += 2;
  873. }
  874. }
  875. if (c3 >= matchMinPos)
  876. {
  877. CHECK_FAILURE_LZ(c3, m)
  878. if (cur[(ptrdiff_t)c3 - (ptrdiff_t)m] == cur[0])
  879. {
  880. *d++ = 3;
  881. *d++ = m - c3 - 1;
  882. }
  883. }
  884. return d;
  885. }
  886. #define INCREASE_LZ_POS p->lzPos++; p->pointerToCurPos++;
  887. /*
  888. static
  889. UInt32* MatchFinderMt_GetMatches_Bt4(CMatchFinderMt *p, UInt32 *d)
  890. {
  891. const UInt32 *bt = p->btBufPos;
  892. const UInt32 len = *bt++;
  893. const UInt32 *btLim = bt + len;
  894. UInt32 matchMinPos;
  895. UInt32 avail = p->btNumAvailBytes - 1;
  896. p->btBufPos = btLim;
  897. {
  898. p->btNumAvailBytes = avail;
  899. #define BT_HASH_BYTES_MAX 5
  900. matchMinPos = p->lzPos;
  901. if (len != 0)
  902. matchMinPos -= bt[1];
  903. else if (avail < (BT_HASH_BYTES_MAX - 1) - 1)
  904. {
  905. INCREASE_LZ_POS
  906. return d;
  907. }
  908. else
  909. {
  910. const UInt32 hs = p->historySize;
  911. if (matchMinPos > hs)
  912. matchMinPos -= hs;
  913. else
  914. matchMinPos = 1;
  915. }
  916. }
  917. for (;;)
  918. {
  919. UInt32 h2, h3, c2, c3;
  920. UInt32 *hash = p->hash;
  921. const Byte *cur = p->pointerToCurPos;
  922. UInt32 m = p->lzPos;
  923. MT_HASH3_CALC
  924. c2 = hash[h2];
  925. c3 = (hash + kFix3HashSize)[h3];
  926. hash[h2] = m;
  927. (hash + kFix3HashSize)[h3] = m;
  928. if (c2 >= matchMinPos && cur[(ptrdiff_t)c2 - (ptrdiff_t)m] == cur[0])
  929. {
  930. d[1] = m - c2 - 1;
  931. if (cur[(ptrdiff_t)c2 - (ptrdiff_t)m + 2] == cur[2])
  932. {
  933. d[0] = 3;
  934. d += 2;
  935. break;
  936. }
  937. // else
  938. {
  939. d[0] = 2;
  940. d += 2;
  941. }
  942. }
  943. if (c3 >= matchMinPos && cur[(ptrdiff_t)c3 - (ptrdiff_t)m] == cur[0])
  944. {
  945. *d++ = 3;
  946. *d++ = m - c3 - 1;
  947. }
  948. break;
  949. }
  950. if (len != 0)
  951. {
  952. do
  953. {
  954. const UInt32 v0 = bt[0];
  955. const UInt32 v1 = bt[1];
  956. bt += 2;
  957. d[0] = v0;
  958. d[1] = v1;
  959. d += 2;
  960. }
  961. while (bt != btLim);
  962. }
  963. INCREASE_LZ_POS
  964. return d;
  965. }
  966. */
  967. static UInt32 * MixMatches4(CMatchFinderMt *p, UInt32 matchMinPos, UInt32 *d)
  968. {
  969. UInt32 h2, h3, /* h4, */ c2, c3 /* , c4 */;
  970. UInt32 *hash = p->hash;
  971. const Byte *cur = p->pointerToCurPos;
  972. const UInt32 m = p->lzPos;
  973. MT_HASH3_CALC
  974. // MT_HASH4_CALC
  975. c2 = hash[h2];
  976. c3 = (hash + kFix3HashSize)[h3];
  977. // c4 = (hash + kFix4HashSize)[h4];
  978. hash[h2] = m;
  979. (hash + kFix3HashSize)[h3] = m;
  980. // (hash + kFix4HashSize)[h4] = m;
  981. // #define BT5_USE_H2
  982. // #ifdef BT5_USE_H2
  983. if (c2 >= matchMinPos && cur[(ptrdiff_t)c2 - (ptrdiff_t)m] == cur[0])
  984. {
  985. d[1] = m - c2 - 1;
  986. if (cur[(ptrdiff_t)c2 - (ptrdiff_t)m + 2] == cur[2])
  987. {
  988. // d[0] = (cur[(ptrdiff_t)c2 - (ptrdiff_t)m + 3] == cur[3]) ? 4 : 3;
  989. // return d + 2;
  990. if (cur[(ptrdiff_t)c2 - (ptrdiff_t)m + 3] == cur[3])
  991. {
  992. d[0] = 4;
  993. return d + 2;
  994. }
  995. d[0] = 3;
  996. d += 2;
  997. #ifdef BT5_USE_H4
  998. if (c4 >= matchMinPos)
  999. if (
  1000. cur[(ptrdiff_t)c4 - (ptrdiff_t)m] == cur[0] &&
  1001. cur[(ptrdiff_t)c4 - (ptrdiff_t)m + 3] == cur[3]
  1002. )
  1003. {
  1004. *d++ = 4;
  1005. *d++ = m - c4 - 1;
  1006. }
  1007. #endif
  1008. return d;
  1009. }
  1010. d[0] = 2;
  1011. d += 2;
  1012. }
  1013. // #endif
  1014. if (c3 >= matchMinPos && cur[(ptrdiff_t)c3 - (ptrdiff_t)m] == cur[0])
  1015. {
  1016. d[1] = m - c3 - 1;
  1017. if (cur[(ptrdiff_t)c3 - (ptrdiff_t)m + 3] == cur[3])
  1018. {
  1019. d[0] = 4;
  1020. return d + 2;
  1021. }
  1022. d[0] = 3;
  1023. d += 2;
  1024. }
  1025. #ifdef BT5_USE_H4
  1026. if (c4 >= matchMinPos)
  1027. if (
  1028. cur[(ptrdiff_t)c4 - (ptrdiff_t)m] == cur[0] &&
  1029. cur[(ptrdiff_t)c4 - (ptrdiff_t)m + 3] == cur[3]
  1030. )
  1031. {
  1032. *d++ = 4;
  1033. *d++ = m - c4 - 1;
  1034. }
  1035. #endif
  1036. return d;
  1037. }
  1038. static UInt32 * MatchFinderMt2_GetMatches(void *_p, UInt32 *d)
  1039. {
  1040. CMatchFinderMt *p = (CMatchFinderMt *)_p;
  1041. const UInt32 *bt = p->btBufPos;
  1042. const UInt32 len = *bt++;
  1043. const UInt32 *btLim = bt + len;
  1044. p->btBufPos = btLim;
  1045. p->btNumAvailBytes--;
  1046. INCREASE_LZ_POS
  1047. {
  1048. while (bt != btLim)
  1049. {
  1050. const UInt32 v0 = bt[0];
  1051. const UInt32 v1 = bt[1];
  1052. bt += 2;
  1053. d[0] = v0;
  1054. d[1] = v1;
  1055. d += 2;
  1056. }
  1057. }
  1058. return d;
  1059. }
  1060. static UInt32 * MatchFinderMt_GetMatches(void *_p, UInt32 *d)
  1061. {
  1062. CMatchFinderMt *p = (CMatchFinderMt *)_p;
  1063. const UInt32 *bt = p->btBufPos;
  1064. UInt32 len = *bt++;
  1065. const UInt32 avail = p->btNumAvailBytes - 1;
  1066. p->btNumAvailBytes = avail;
  1067. p->btBufPos = bt + len;
  1068. if (len == 0)
  1069. {
  1070. #define BT_HASH_BYTES_MAX 5
  1071. if (avail >= (BT_HASH_BYTES_MAX - 1) - 1)
  1072. {
  1073. UInt32 m = p->lzPos;
  1074. if (m > p->historySize)
  1075. m -= p->historySize;
  1076. else
  1077. m = 1;
  1078. d = p->MixMatchesFunc(p, m, d);
  1079. }
  1080. }
  1081. else
  1082. {
  1083. /*
  1084. first match pair from BinTree: (match_len, match_dist),
  1085. (match_len >= numHashBytes).
  1086. MixMatchesFunc() inserts only hash matches that are nearer than (match_dist)
  1087. */
  1088. d = p->MixMatchesFunc(p, p->lzPos - bt[1], d);
  1089. // if (d) // check for failure
  1090. do
  1091. {
  1092. const UInt32 v0 = bt[0];
  1093. const UInt32 v1 = bt[1];
  1094. bt += 2;
  1095. d[0] = v0;
  1096. d[1] = v1;
  1097. d += 2;
  1098. }
  1099. while (len -= 2);
  1100. }
  1101. INCREASE_LZ_POS
  1102. return d;
  1103. }
  1104. #define SKIP_HEADER2_MT do { GET_NEXT_BLOCK_IF_REQUIRED
  1105. #define SKIP_HEADER_MT(n) SKIP_HEADER2_MT if (p->btNumAvailBytes-- >= (n)) { const Byte *cur = p->pointerToCurPos; UInt32 *hash = p->hash;
  1106. #define SKIP_FOOTER_MT } INCREASE_LZ_POS p->btBufPos += (size_t)*p->btBufPos + 1; } while (--num != 0);
  1107. static void MatchFinderMt0_Skip(void *_p, UInt32 num)
  1108. {
  1109. CMatchFinderMt *p = (CMatchFinderMt *)_p;
  1110. SKIP_HEADER2_MT { p->btNumAvailBytes--;
  1111. SKIP_FOOTER_MT
  1112. }
  1113. static void MatchFinderMt2_Skip(void *_p, UInt32 num)
  1114. {
  1115. CMatchFinderMt *p = (CMatchFinderMt *)_p;
  1116. SKIP_HEADER_MT(2)
  1117. UInt32 h2;
  1118. MT_HASH2_CALC
  1119. hash[h2] = p->lzPos;
  1120. SKIP_FOOTER_MT
  1121. }
  1122. static void MatchFinderMt3_Skip(void *_p, UInt32 num)
  1123. {
  1124. CMatchFinderMt *p = (CMatchFinderMt *)_p;
  1125. SKIP_HEADER_MT(3)
  1126. UInt32 h2, h3;
  1127. MT_HASH3_CALC
  1128. (hash + kFix3HashSize)[h3] =
  1129. hash[ h2] =
  1130. p->lzPos;
  1131. SKIP_FOOTER_MT
  1132. }
  1133. /*
  1134. // MatchFinderMt4_Skip() is similar to MatchFinderMt3_Skip().
  1135. // The difference is that MatchFinderMt3_Skip() updates hash for last 3 bytes of stream.
  1136. static void MatchFinderMt4_Skip(CMatchFinderMt *p, UInt32 num)
  1137. {
  1138. SKIP_HEADER_MT(4)
  1139. UInt32 h2, h3; // h4
  1140. MT_HASH3_CALC
  1141. // MT_HASH4_CALC
  1142. // (hash + kFix4HashSize)[h4] =
  1143. (hash + kFix3HashSize)[h3] =
  1144. hash[ h2] =
  1145. p->lzPos;
  1146. SKIP_FOOTER_MT
  1147. }
  1148. */
  1149. void MatchFinderMt_CreateVTable(CMatchFinderMt *p, IMatchFinder2 *vTable)
  1150. {
  1151. vTable->Init = MatchFinderMt_Init;
  1152. vTable->GetNumAvailableBytes = MatchFinderMt_GetNumAvailableBytes;
  1153. vTable->GetPointerToCurrentPos = MatchFinderMt_GetPointerToCurrentPos;
  1154. vTable->GetMatches = MatchFinderMt_GetMatches;
  1155. switch (MF(p)->numHashBytes)
  1156. {
  1157. case 2:
  1158. p->GetHeadsFunc = GetHeads2;
  1159. p->MixMatchesFunc = NULL;
  1160. vTable->Skip = MatchFinderMt0_Skip;
  1161. vTable->GetMatches = MatchFinderMt2_GetMatches;
  1162. break;
  1163. case 3:
  1164. p->GetHeadsFunc = MF(p)->bigHash ? GetHeads3b : GetHeads3;
  1165. p->MixMatchesFunc = MixMatches2;
  1166. vTable->Skip = MatchFinderMt2_Skip;
  1167. break;
  1168. case 4:
  1169. p->GetHeadsFunc = MF(p)->bigHash ? GetHeads4b : GetHeads4;
  1170. // it's fast inline version of GetMatches()
  1171. // vTable->GetMatches = MatchFinderMt_GetMatches_Bt4;
  1172. p->MixMatchesFunc = MixMatches3;
  1173. vTable->Skip = MatchFinderMt3_Skip;
  1174. break;
  1175. default:
  1176. p->GetHeadsFunc = MF(p)->bigHash ? GetHeads5b : GetHeads5;
  1177. p->MixMatchesFunc = MixMatches4;
  1178. vTable->Skip =
  1179. MatchFinderMt3_Skip;
  1180. // MatchFinderMt4_Skip;
  1181. break;
  1182. }
  1183. }
  1184. #undef RINOK_THREAD
  1185. #undef PRF
  1186. #undef MF
  1187. #undef GetUi24hi_from32
  1188. #undef LOCK_BUFFER
  1189. #undef UNLOCK_BUFFER