hash.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633
  1. /* Copyright 2010 Google Inc. All Rights Reserved.
  2. Distributed under MIT license.
  3. See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
  4. */
  5. /* A (forgetful) hash table to the data seen by the compressor, to
  6. help create backward references to previous data. */
  7. #ifndef BROTLI_ENC_HASH_H_
  8. #define BROTLI_ENC_HASH_H_
  9. #include <string.h> /* memcmp, memset */
  10. #include "../common/dictionary.h"
  11. #include "../common/types.h"
  12. #include "./dictionary_hash.h"
  13. #include "./fast_log.h"
  14. #include "./find_match_length.h"
  15. #include "./memory.h"
  16. #include "./port.h"
  17. #include "./static_dict.h"
  18. #if defined(__cplusplus) || defined(c_plusplus)
  19. extern "C" {
  20. #endif
  21. #define MAX_TREE_SEARCH_DEPTH 64
  22. #define MAX_TREE_COMP_LENGTH 128
  23. static const uint32_t kDistanceCacheIndex[] = {
  24. 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
  25. };
  26. static const int kDistanceCacheOffset[] = {
  27. 0, 0, 0, 0, -1, 1, -2, 2, -3, 3, -1, 1, -2, 2, -3, 3
  28. };
  29. static const uint32_t kCutoffTransformsCount = 10;
  30. static const uint8_t kCutoffTransforms[] = {
  31. 0, 12, 27, 23, 42, 63, 56, 48, 59, 64
  32. };
  33. /* kHashMul32 multiplier has these properties:
  34. * The multiplier must be odd. Otherwise we may lose the highest bit.
  35. * No long streaks of 1s or 0s.
  36. * There is no effort to ensure that it is a prime, the oddity is enough
  37. for this use.
  38. * The number has been tuned heuristically against compression benchmarks. */
  39. static const uint32_t kHashMul32 = 0x1e35a7bd;
  40. static BROTLI_INLINE uint32_t Hash14(const uint8_t* data) {
  41. uint32_t h = BROTLI_UNALIGNED_LOAD32(data) * kHashMul32;
  42. /* The higher bits contain more mixture from the multiplication,
  43. so we take our results from there. */
  44. return h >> (32 - 14);
  45. }
  46. /* Usually, we always choose the longest backward reference. This function
  47. allows for the exception of that rule.
  48. If we choose a backward reference that is further away, it will
  49. usually be coded with more bits. We approximate this by assuming
  50. log2(distance). If the distance can be expressed in terms of the
  51. last four distances, we use some heuristic constants to estimate
  52. the bits cost. For the first up to four literals we use the bit
  53. cost of the literals from the literal cost model, after that we
  54. use the average bit cost of the cost model.
  55. This function is used to sometimes discard a longer backward reference
  56. when it is not much longer and the bit cost for encoding it is more
  57. than the saved literals.
  58. backward_reference_offset MUST be positive. */
  59. static BROTLI_INLINE double BackwardReferenceScore(
  60. size_t copy_length, size_t backward_reference_offset) {
  61. return 5.4 * (double)copy_length -
  62. 1.20 * Log2FloorNonZero(backward_reference_offset);
  63. }
  64. static BROTLI_INLINE double BackwardReferenceScoreUsingLastDistance(
  65. size_t copy_length, size_t distance_short_code) {
  66. static const double kDistanceShortCodeBitCost[16] = {
  67. -0.6, 0.95, 1.17, 1.27,
  68. 0.93, 0.93, 0.96, 0.96, 0.99, 0.99,
  69. 1.05, 1.05, 1.15, 1.15, 1.25, 1.25
  70. };
  71. return 5.4 * (double)copy_length -
  72. kDistanceShortCodeBitCost[distance_short_code];
  73. }
  74. typedef struct BackwardMatch {
  75. uint32_t distance;
  76. uint32_t length_and_code;
  77. } BackwardMatch;
  78. static BROTLI_INLINE void InitBackwardMatch(BackwardMatch* self,
  79. size_t dist, size_t len) {
  80. self->distance = (uint32_t)dist;
  81. self->length_and_code = (uint32_t)(len << 5);
  82. }
  83. static BROTLI_INLINE void InitDictionaryBackwardMatch(BackwardMatch* self,
  84. size_t dist, size_t len, size_t len_code) {
  85. self->distance = (uint32_t)dist;
  86. self->length_and_code =
  87. (uint32_t)((len << 5) | (len == len_code ? 0 : len_code));
  88. }
  89. static BROTLI_INLINE size_t BackwardMatchLength(const BackwardMatch* self) {
  90. return self->length_and_code >> 5;
  91. }
  92. static BROTLI_INLINE size_t BackwardMatchLengthCode(const BackwardMatch* self) {
  93. size_t code = self->length_and_code & 31;
  94. return code ? code : BackwardMatchLength(self);
  95. }
  96. #define EXPAND_CAT(a, b) CAT(a, b)
  97. #define CAT(a, b) a ## b
  98. #define FN(X) EXPAND_CAT(X, HASHER())
  99. #define MAX_NUM_MATCHES_H10 (64 + MAX_TREE_SEARCH_DEPTH)
  100. #define HASHER() H10
  101. #define HashToBinaryTree HASHER()
  102. #define BUCKET_BITS 17
  103. #define BUCKET_SIZE (1 << BUCKET_BITS)
  104. static size_t FN(HashTypeLength)(void) { return 4; }
  105. static size_t FN(StoreLookahead)(void) { return MAX_TREE_COMP_LENGTH; }
  106. static uint32_t FN(HashBytes)(const uint8_t *data) {
  107. uint32_t h = BROTLI_UNALIGNED_LOAD32(data) * kHashMul32;
  108. /* The higher bits contain more mixture from the multiplication,
  109. so we take our results from there. */
  110. return h >> (32 - BUCKET_BITS);
  111. }
  112. /* A (forgetful) hash table where each hash bucket contains a binary tree of
  113. sequences whose first 4 bytes share the same hash code.
  114. Each sequence is MAX_TREE_COMP_LENGTH long and is identified by its starting
  115. position in the input data. The binary tree is sorted by the lexicographic
  116. order of the sequences, and it is also a max-heap with respect to the
  117. starting positions. */
  118. typedef struct HashToBinaryTree {
  119. /* The window size minus 1 */
  120. size_t window_mask_;
  121. /* Hash table that maps the 4-byte hashes of the sequence to the last
  122. position where this hash was found, which is the root of the binary
  123. tree of sequences that share this hash bucket. */
  124. uint32_t buckets_[BUCKET_SIZE];
  125. /* The union of the binary trees of each hash bucket. The root of the tree
  126. corresponding to a hash is a sequence starting at buckets_[hash] and
  127. the left and right children of a sequence starting at pos are
  128. forest_[2 * pos] and forest_[2 * pos + 1]. */
  129. uint32_t* forest_;
  130. /* A position used to mark a non-existent sequence, i.e. a tree is empty if
  131. its root is at invalid_pos_ and a node is a leaf if both its children
  132. are at invalid_pos_. */
  133. uint32_t invalid_pos_;
  134. int is_dirty_;
  135. } HashToBinaryTree;
  136. static void FN(Reset)(HashToBinaryTree* self) {
  137. self->is_dirty_ = 1;
  138. }
  139. static void FN(Initialize)(HashToBinaryTree* self) {
  140. self->forest_ = NULL;
  141. FN(Reset)(self);
  142. }
  143. static void FN(Cleanup)(MemoryManager* m, HashToBinaryTree* self) {
  144. BROTLI_FREE(m, self->forest_);
  145. }
  146. static void FN(Init)(
  147. MemoryManager* m, HashToBinaryTree* self, const uint8_t* data, int lgwin,
  148. size_t position, size_t bytes, int is_last) {
  149. if (self->is_dirty_) {
  150. uint32_t invalid_pos;
  151. size_t num_nodes;
  152. uint32_t i;
  153. BROTLI_UNUSED(data);
  154. self->window_mask_ = (1u << lgwin) - 1u;
  155. invalid_pos = (uint32_t)(0 - self->window_mask_);
  156. self->invalid_pos_ = invalid_pos;
  157. for (i = 0; i < BUCKET_SIZE; i++) {
  158. self->buckets_[i] = invalid_pos;
  159. }
  160. num_nodes = (position == 0 && is_last) ? bytes : self->window_mask_ + 1;
  161. self->forest_ = BROTLI_ALLOC(m, uint32_t, 2 * num_nodes);
  162. self->is_dirty_ = 0;
  163. if (BROTLI_IS_OOM(m)) return;
  164. }
  165. }
  166. static BROTLI_INLINE size_t FN(LeftChildIndex)(HashToBinaryTree* self,
  167. const size_t pos) {
  168. return 2 * (pos & self->window_mask_);
  169. }
  170. static BROTLI_INLINE size_t FN(RightChildIndex)(HashToBinaryTree* self,
  171. const size_t pos) {
  172. return 2 * (pos & self->window_mask_) + 1;
  173. }
  174. /* Stores the hash of the next 4 bytes and in a single tree-traversal, the
  175. hash bucket's binary tree is searched for matches and is re-rooted at the
  176. current position.
  177. If less than MAX_TREE_COMP_LENGTH data is available, the hash bucket of the
  178. current position is searched for matches, but the state of the hash table
  179. is not changed, since we can not know the final sorting order of the
  180. current (incomplete) sequence.
  181. This function must be called with increasing cur_ix positions. */
  182. static BROTLI_INLINE BackwardMatch* FN(StoreAndFindMatches)(
  183. HashToBinaryTree* self, const uint8_t* const BROTLI_RESTRICT data,
  184. const size_t cur_ix, const size_t ring_buffer_mask, const size_t max_length,
  185. const size_t max_backward, size_t* const BROTLI_RESTRICT best_len,
  186. BackwardMatch* BROTLI_RESTRICT matches) {
  187. const size_t cur_ix_masked = cur_ix & ring_buffer_mask;
  188. const size_t max_comp_len =
  189. BROTLI_MIN(size_t, max_length, MAX_TREE_COMP_LENGTH);
  190. const int should_reroot_tree = (max_length >= MAX_TREE_COMP_LENGTH) ? 1 : 0;
  191. const uint32_t key = FN(HashBytes)(&data[cur_ix_masked]);
  192. size_t prev_ix = self->buckets_[key];
  193. /* The forest index of the rightmost node of the left subtree of the new
  194. root, updated as we traverse and reroot the tree of the hash bucket. */
  195. size_t node_left = FN(LeftChildIndex)(self, cur_ix);
  196. /* The forest index of the leftmost node of the right subtree of the new
  197. root, updated as we traverse and reroot the tree of the hash bucket. */
  198. size_t node_right = FN(RightChildIndex)(self, cur_ix);
  199. /* The match length of the rightmost node of the left subtree of the new
  200. root, updated as we traverse and reroot the tree of the hash bucket. */
  201. size_t best_len_left = 0;
  202. /* The match length of the leftmost node of the right subtree of the new
  203. root, updated as we traverse and reroot the tree of the hash bucket. */
  204. size_t best_len_right = 0;
  205. size_t depth_remaining;
  206. if (should_reroot_tree) {
  207. self->buckets_[key] = (uint32_t)cur_ix;
  208. }
  209. for (depth_remaining = MAX_TREE_SEARCH_DEPTH; ; --depth_remaining) {
  210. const size_t backward = cur_ix - prev_ix;
  211. const size_t prev_ix_masked = prev_ix & ring_buffer_mask;
  212. if (backward == 0 || backward > max_backward || depth_remaining == 0) {
  213. if (should_reroot_tree) {
  214. self->forest_[node_left] = self->invalid_pos_;
  215. self->forest_[node_right] = self->invalid_pos_;
  216. }
  217. break;
  218. }
  219. {
  220. const size_t cur_len = BROTLI_MIN(size_t, best_len_left, best_len_right);
  221. size_t len;
  222. assert(cur_len <= MAX_TREE_COMP_LENGTH);
  223. len = cur_len +
  224. FindMatchLengthWithLimit(&data[cur_ix_masked + cur_len],
  225. &data[prev_ix_masked + cur_len],
  226. max_length - cur_len);
  227. assert(0 == memcmp(&data[cur_ix_masked], &data[prev_ix_masked], len));
  228. if (matches && len > *best_len) {
  229. *best_len = len;
  230. InitBackwardMatch(matches++, backward, len);
  231. }
  232. if (len >= max_comp_len) {
  233. if (should_reroot_tree) {
  234. self->forest_[node_left] =
  235. self->forest_[FN(LeftChildIndex)(self, prev_ix)];
  236. self->forest_[node_right] =
  237. self->forest_[FN(RightChildIndex)(self, prev_ix)];
  238. }
  239. break;
  240. }
  241. if (data[cur_ix_masked + len] > data[prev_ix_masked + len]) {
  242. best_len_left = len;
  243. if (should_reroot_tree) {
  244. self->forest_[node_left] = (uint32_t)prev_ix;
  245. }
  246. node_left = FN(RightChildIndex)(self, prev_ix);
  247. prev_ix = self->forest_[node_left];
  248. } else {
  249. best_len_right = len;
  250. if (should_reroot_tree) {
  251. self->forest_[node_right] = (uint32_t)prev_ix;
  252. }
  253. node_right = FN(LeftChildIndex)(self, prev_ix);
  254. prev_ix = self->forest_[node_right];
  255. }
  256. }
  257. }
  258. return matches;
  259. }
  260. /* Finds all backward matches of &data[cur_ix & ring_buffer_mask] up to the
  261. length of max_length and stores the position cur_ix in the hash table.
  262. Sets *num_matches to the number of matches found, and stores the found
  263. matches in matches[0] to matches[*num_matches - 1]. The matches will be
  264. sorted by strictly increasing length and (non-strictly) increasing
  265. distance. */
  266. static BROTLI_INLINE size_t FN(FindAllMatches)(HashToBinaryTree* self,
  267. const uint8_t* data, const size_t ring_buffer_mask, const size_t cur_ix,
  268. const size_t max_length, const size_t max_backward, const int quality,
  269. BackwardMatch* matches) {
  270. BackwardMatch* const orig_matches = matches;
  271. const size_t cur_ix_masked = cur_ix & ring_buffer_mask;
  272. size_t best_len = 1;
  273. const size_t short_match_max_backward = quality <= 10 ? 16 : 64;
  274. size_t stop = cur_ix - short_match_max_backward;
  275. uint32_t dict_matches[BROTLI_MAX_STATIC_DICTIONARY_MATCH_LEN + 1];
  276. size_t i;
  277. if (cur_ix < short_match_max_backward) { stop = 0; }
  278. for (i = cur_ix - 1; i > stop && best_len <= 2; --i) {
  279. size_t prev_ix = i;
  280. const size_t backward = cur_ix - prev_ix;
  281. if (PREDICT_FALSE(backward > max_backward)) {
  282. break;
  283. }
  284. prev_ix &= ring_buffer_mask;
  285. if (data[cur_ix_masked] != data[prev_ix] ||
  286. data[cur_ix_masked + 1] != data[prev_ix + 1]) {
  287. continue;
  288. }
  289. {
  290. const size_t len =
  291. FindMatchLengthWithLimit(&data[prev_ix], &data[cur_ix_masked],
  292. max_length);
  293. if (len > best_len) {
  294. best_len = len;
  295. InitBackwardMatch(matches++, backward, len);
  296. }
  297. }
  298. }
  299. if (best_len < max_length) {
  300. matches = FN(StoreAndFindMatches)(self, data, cur_ix, ring_buffer_mask,
  301. max_length, max_backward, &best_len, matches);
  302. }
  303. for (i = 0; i <= BROTLI_MAX_STATIC_DICTIONARY_MATCH_LEN; ++i) {
  304. dict_matches[i] = kInvalidMatch;
  305. }
  306. {
  307. size_t minlen = BROTLI_MAX(size_t, 4, best_len + 1);
  308. if (BrotliFindAllStaticDictionaryMatches(&data[cur_ix_masked], minlen,
  309. max_length, &dict_matches[0])) {
  310. size_t maxlen = BROTLI_MIN(
  311. size_t, BROTLI_MAX_STATIC_DICTIONARY_MATCH_LEN, max_length);
  312. size_t l;
  313. for (l = minlen; l <= maxlen; ++l) {
  314. uint32_t dict_id = dict_matches[l];
  315. if (dict_id < kInvalidMatch) {
  316. InitDictionaryBackwardMatch(matches++,
  317. max_backward + (dict_id >> 5) + 1, l, dict_id & 31);
  318. }
  319. }
  320. }
  321. }
  322. return (size_t)(matches - orig_matches);
  323. }
  324. /* Stores the hash of the next 4 bytes and re-roots the binary tree at the
  325. current sequence, without returning any matches.
  326. REQUIRES: ix + MAX_TREE_COMP_LENGTH <= end-of-current-block */
  327. static BROTLI_INLINE void FN(Store)(HashToBinaryTree* self, const uint8_t *data,
  328. const size_t mask, const size_t ix) {
  329. /* Maximum distance is window size - 16, see section 9.1. of the spec. */
  330. const size_t max_backward = self->window_mask_ - 15;
  331. FN(StoreAndFindMatches)(self, data, ix, mask, MAX_TREE_COMP_LENGTH,
  332. max_backward, NULL, NULL);
  333. }
  334. static BROTLI_INLINE void FN(StoreRange)(HashToBinaryTree* self,
  335. const uint8_t *data, const size_t mask, const size_t ix_start,
  336. const size_t ix_end) {
  337. size_t i = ix_start + 63 <= ix_end ? ix_end - 63 : ix_start;
  338. for (; i < ix_end; ++i) {
  339. FN(Store)(self, data, mask, i);
  340. }
  341. }
  342. static BROTLI_INLINE void FN(StitchToPreviousBlock)(HashToBinaryTree* self,
  343. size_t num_bytes, size_t position, const uint8_t* ringbuffer,
  344. size_t ringbuffer_mask) {
  345. if (num_bytes >= FN(HashTypeLength)() - 1 &&
  346. position >= MAX_TREE_COMP_LENGTH) {
  347. /* Store the last `MAX_TREE_COMP_LENGTH - 1` positions in the hasher.
  348. These could not be calculated before, since they require knowledge
  349. of both the previous and the current block. */
  350. const size_t i_start = position - MAX_TREE_COMP_LENGTH + 1;
  351. const size_t i_end = BROTLI_MIN(size_t, position, i_start + num_bytes);
  352. size_t i;
  353. for (i = i_start; i < i_end; ++i) {
  354. /* Maximum distance is window size - 16, see section 9.1. of the spec.
  355. Furthermore, we have to make sure that we don't look further back
  356. from the start of the next block than the window size, otherwise we
  357. could access already overwritten areas of the ringbuffer. */
  358. const size_t max_backward =
  359. self->window_mask_ - BROTLI_MAX(size_t, 15, position - i);
  360. /* We know that i + MAX_TREE_COMP_LENGTH <= position + num_bytes, i.e. the
  361. end of the current block and that we have at least
  362. MAX_TREE_COMP_LENGTH tail in the ringbuffer. */
  363. FN(StoreAndFindMatches)(self, ringbuffer, i, ringbuffer_mask,
  364. MAX_TREE_COMP_LENGTH, max_backward, NULL, NULL);
  365. }
  366. }
  367. }
  368. #undef BUCKET_SIZE
  369. #undef BUCKET_BITS
  370. #undef HASHER
  371. /* For BUCKET_SWEEP == 1, enabling the dictionary lookup makes compression
  372. a little faster (0.5% - 1%) and it compresses 0.15% better on small text
  373. and html inputs. */
  374. #define HASHER() H2
  375. #define BUCKET_BITS 16
  376. #define BUCKET_SWEEP 1
  377. #define USE_DICTIONARY 1
  378. #include "./hash_longest_match_quickly_inc.h" /* NOLINT(build/include) */
  379. #undef BUCKET_SWEEP
  380. #undef USE_DICTIONARY
  381. #undef HASHER
  382. #define HASHER() H3
  383. #define BUCKET_SWEEP 2
  384. #define USE_DICTIONARY 0
  385. #include "./hash_longest_match_quickly_inc.h" /* NOLINT(build/include) */
  386. #undef USE_DICTIONARY
  387. #undef BUCKET_SWEEP
  388. #undef BUCKET_BITS
  389. #undef HASHER
  390. #define HASHER() H4
  391. #define BUCKET_BITS 17
  392. #define BUCKET_SWEEP 4
  393. #define USE_DICTIONARY 1
  394. #include "./hash_longest_match_quickly_inc.h" /* NOLINT(build/include) */
  395. #undef USE_DICTIONARY
  396. #undef BUCKET_SWEEP
  397. #undef BUCKET_BITS
  398. #undef HASHER
  399. #define HASHER() H5
  400. #define BUCKET_BITS 14
  401. #define BLOCK_BITS 4
  402. #define NUM_LAST_DISTANCES_TO_CHECK 4
  403. #include "./hash_longest_match_inc.h" /* NOLINT(build/include) */
  404. #undef BLOCK_BITS
  405. #undef HASHER
  406. #define HASHER() H6
  407. #define BLOCK_BITS 5
  408. #include "./hash_longest_match_inc.h" /* NOLINT(build/include) */
  409. #undef NUM_LAST_DISTANCES_TO_CHECK
  410. #undef BLOCK_BITS
  411. #undef BUCKET_BITS
  412. #undef HASHER
  413. #define HASHER() H7
  414. #define BUCKET_BITS 15
  415. #define BLOCK_BITS 6
  416. #define NUM_LAST_DISTANCES_TO_CHECK 10
  417. #include "./hash_longest_match_inc.h" /* NOLINT(build/include) */
  418. #undef BLOCK_BITS
  419. #undef HASHER
  420. #define HASHER() H8
  421. #define BLOCK_BITS 7
  422. #include "./hash_longest_match_inc.h" /* NOLINT(build/include) */
  423. #undef NUM_LAST_DISTANCES_TO_CHECK
  424. #undef BLOCK_BITS
  425. #undef HASHER
  426. #define HASHER() H9
  427. #define BLOCK_BITS 8
  428. #define NUM_LAST_DISTANCES_TO_CHECK 16
  429. #include "./hash_longest_match_inc.h" /* NOLINT(build/include) */
  430. #undef NUM_LAST_DISTANCES_TO_CHECK
  431. #undef BLOCK_BITS
  432. #undef BUCKET_BITS
  433. #undef HASHER
  434. #undef FN
  435. #undef CAT
  436. #undef EXPAND_CAT
  437. typedef struct Hashers {
  438. H2* hash_h2;
  439. H3* hash_h3;
  440. H4* hash_h4;
  441. H5* hash_h5;
  442. H6* hash_h6;
  443. H7* hash_h7;
  444. H8* hash_h8;
  445. H9* hash_h9;
  446. H10* hash_h10;
  447. } Hashers;
  448. static BROTLI_INLINE void InitHashers(Hashers* self) {
  449. self->hash_h2 = 0;
  450. self->hash_h3 = 0;
  451. self->hash_h4 = 0;
  452. self->hash_h5 = 0;
  453. self->hash_h6 = 0;
  454. self->hash_h7 = 0;
  455. self->hash_h8 = 0;
  456. self->hash_h9 = 0;
  457. self->hash_h10 = 0;
  458. }
  459. static BROTLI_INLINE void DestroyHashers(MemoryManager* m, Hashers* self) {
  460. BROTLI_FREE(m, self->hash_h2);
  461. BROTLI_FREE(m, self->hash_h3);
  462. BROTLI_FREE(m, self->hash_h4);
  463. BROTLI_FREE(m, self->hash_h5);
  464. BROTLI_FREE(m, self->hash_h6);
  465. BROTLI_FREE(m, self->hash_h7);
  466. BROTLI_FREE(m, self->hash_h8);
  467. BROTLI_FREE(m, self->hash_h9);
  468. if (self->hash_h10) CleanupH10(m, self->hash_h10);
  469. BROTLI_FREE(m, self->hash_h10);
  470. }
  471. static BROTLI_INLINE void HashersSetup(
  472. MemoryManager* m, Hashers* self, int type) {
  473. switch (type) {
  474. case 2:
  475. self->hash_h2 = BROTLI_ALLOC(m, H2, 1);
  476. if (BROTLI_IS_OOM(m)) return;
  477. ResetH2(self->hash_h2);
  478. break;
  479. case 3:
  480. self->hash_h3 = BROTLI_ALLOC(m, H3, 1);
  481. if (BROTLI_IS_OOM(m)) return;
  482. ResetH3(self->hash_h3);
  483. break;
  484. case 4:
  485. self->hash_h4 = BROTLI_ALLOC(m, H4, 1);
  486. if (BROTLI_IS_OOM(m)) return;
  487. ResetH4(self->hash_h4);
  488. break;
  489. case 5:
  490. self->hash_h5 = BROTLI_ALLOC(m, H5, 1);
  491. if (BROTLI_IS_OOM(m)) return;
  492. ResetH5(self->hash_h5);
  493. break;
  494. case 6:
  495. self->hash_h6 = BROTLI_ALLOC(m, H6, 1);
  496. if (BROTLI_IS_OOM(m)) return;
  497. ResetH6(self->hash_h6);
  498. break;
  499. case 7:
  500. self->hash_h7 = BROTLI_ALLOC(m, H7, 1);
  501. if (BROTLI_IS_OOM(m)) return;
  502. ResetH7(self->hash_h7);
  503. break;
  504. case 8:
  505. self->hash_h8 = BROTLI_ALLOC(m, H8, 1);
  506. if (BROTLI_IS_OOM(m)) return;
  507. ResetH8(self->hash_h8);
  508. break;
  509. case 9:
  510. self->hash_h9 = BROTLI_ALLOC(m, H9, 1);
  511. if (BROTLI_IS_OOM(m)) return;
  512. ResetH9(self->hash_h9);
  513. break;
  514. case 10:
  515. self->hash_h10 = BROTLI_ALLOC(m, H10, 1);
  516. if (BROTLI_IS_OOM(m)) return;
  517. InitializeH10(self->hash_h10);
  518. break;
  519. default: break;
  520. }
  521. }
  522. #define _TEMPLATE(Hasher) \
  523. static BROTLI_INLINE void WarmupHash ## Hasher(MemoryManager* m, \
  524. const int lgwin, const size_t size, const uint8_t* dict, Hasher* hasher) { \
  525. size_t overlap = (StoreLookahead ## Hasher()) - 1; \
  526. size_t i; \
  527. Init ## Hasher(m, hasher, dict, lgwin, 0, size, 0); \
  528. if (BROTLI_IS_OOM(m)) return; \
  529. for (i = 0; i + overlap < size; i++) { \
  530. Store ## Hasher(hasher, dict, ~(size_t)0, i); \
  531. } \
  532. }
  533. _TEMPLATE(H2) _TEMPLATE(H3) _TEMPLATE(H4) _TEMPLATE(H5) _TEMPLATE(H6)
  534. _TEMPLATE(H7) _TEMPLATE(H8) _TEMPLATE(H9) _TEMPLATE(H10)
  535. #undef _TEMPLATE
  536. /* Custom LZ77 window. */
  537. static BROTLI_INLINE void HashersPrependCustomDictionary(
  538. MemoryManager* m, Hashers* self, int type, int lgwin, const size_t size,
  539. const uint8_t* dict) {
  540. switch (type) {
  541. case 2: WarmupHashH2(m, lgwin, size, dict, self->hash_h2); break;
  542. case 3: WarmupHashH3(m, lgwin, size, dict, self->hash_h3); break;
  543. case 4: WarmupHashH4(m, lgwin, size, dict, self->hash_h4); break;
  544. case 5: WarmupHashH5(m, lgwin, size, dict, self->hash_h5); break;
  545. case 6: WarmupHashH6(m, lgwin, size, dict, self->hash_h6); break;
  546. case 7: WarmupHashH7(m, lgwin, size, dict, self->hash_h7); break;
  547. case 8: WarmupHashH8(m, lgwin, size, dict, self->hash_h8); break;
  548. case 9: WarmupHashH9(m, lgwin, size, dict, self->hash_h9); break;
  549. case 10: WarmupHashH10(m, lgwin, size, dict, self->hash_h10); break;
  550. default: break;
  551. }
  552. if (BROTLI_IS_OOM(m)) return;
  553. }
  554. #if defined(__cplusplus) || defined(c_plusplus)
  555. } /* extern "C" */
  556. #endif
  557. #endif /* BROTLI_ENC_HASH_H_ */