cover.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253
  1. /*
  2. * Copyright (c) Yann Collet, Facebook, Inc.
  3. * All rights reserved.
  4. *
  5. * This source code is licensed under both the BSD-style license (found in the
  6. * LICENSE file in the root directory of this source tree) and the GPLv2 (found
  7. * in the COPYING file in the root directory of this source tree).
  8. * You may select, at your option, one of the above-listed licenses.
  9. */
  10. /* *****************************************************************************
  11. * Constructs a dictionary using a heuristic based on the following paper:
  12. *
  13. * Liao, Petri, Moffat, Wirth
  14. * Effective Construction of Relative Lempel-Ziv Dictionaries
  15. * Published in WWW 2016.
  16. *
  17. * Adapted from code originally written by @ot (Giuseppe Ottaviano).
  18. ******************************************************************************/
  19. /*-*************************************
  20. * Dependencies
  21. ***************************************/
  22. #include <stdio.h> /* fprintf */
  23. #include <stdlib.h> /* malloc, free, qsort */
  24. #include <string.h> /* memset */
  25. #include <time.h> /* clock */
  26. #ifndef ZDICT_STATIC_LINKING_ONLY
  27. # define ZDICT_STATIC_LINKING_ONLY
  28. #endif
  29. #include "../common/mem.h" /* read */
  30. #include "../common/pool.h"
  31. #include "../common/threading.h"
  32. #include "../common/zstd_internal.h" /* includes zstd.h */
  33. #include "../zdict.h"
  34. #include "cover.h"
  35. /*-*************************************
  36. * Constants
  37. ***************************************/
  38. /**
  39. * There are 32bit indexes used to ref samples, so limit samples size to 4GB
  40. * on 64bit builds.
  41. * For 32bit builds we choose 1 GB.
  42. * Most 32bit platforms have 2GB user-mode addressable space and we allocate a large
  43. * contiguous buffer, so 1GB is already a high limit.
  44. */
  45. #define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))
  46. #define COVER_DEFAULT_SPLITPOINT 1.0
  47. /*-*************************************
  48. * Console display
  49. ***************************************/
  50. #ifndef LOCALDISPLAYLEVEL
  51. static int g_displayLevel = 0;
  52. #endif
  53. #undef DISPLAY
  54. #define DISPLAY(...) \
  55. { \
  56. fprintf(stderr, __VA_ARGS__); \
  57. fflush(stderr); \
  58. }
  59. #undef LOCALDISPLAYLEVEL
  60. #define LOCALDISPLAYLEVEL(displayLevel, l, ...) \
  61. if (displayLevel >= l) { \
  62. DISPLAY(__VA_ARGS__); \
  63. } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */
  64. #undef DISPLAYLEVEL
  65. #define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__)
  66. #ifndef LOCALDISPLAYUPDATE
  67. static const clock_t g_refreshRate = CLOCKS_PER_SEC * 15 / 100;
  68. static clock_t g_time = 0;
  69. #endif
  70. #undef LOCALDISPLAYUPDATE
  71. #define LOCALDISPLAYUPDATE(displayLevel, l, ...) \
  72. if (displayLevel >= l) { \
  73. if ((clock() - g_time > g_refreshRate) || (displayLevel >= 4)) { \
  74. g_time = clock(); \
  75. DISPLAY(__VA_ARGS__); \
  76. } \
  77. }
  78. #undef DISPLAYUPDATE
  79. #define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__)
  80. /*-*************************************
  81. * Hash table
  82. ***************************************
  83. * A small specialized hash map for storing activeDmers.
  84. * The map does not resize, so if it becomes full it will loop forever.
  85. * Thus, the map must be large enough to store every value.
  86. * The map implements linear probing and keeps its load less than 0.5.
  87. */
  88. #define MAP_EMPTY_VALUE ((U32)-1)
  89. typedef struct COVER_map_pair_t_s {
  90. U32 key;
  91. U32 value;
  92. } COVER_map_pair_t;
  93. typedef struct COVER_map_s {
  94. COVER_map_pair_t *data;
  95. U32 sizeLog;
  96. U32 size;
  97. U32 sizeMask;
  98. } COVER_map_t;
  99. /**
  100. * Clear the map.
  101. */
  102. static void COVER_map_clear(COVER_map_t *map) {
  103. memset(map->data, MAP_EMPTY_VALUE, map->size * sizeof(COVER_map_pair_t));
  104. }
  105. /**
  106. * Initializes a map of the given size.
  107. * Returns 1 on success and 0 on failure.
  108. * The map must be destroyed with COVER_map_destroy().
  109. * The map is only guaranteed to be large enough to hold size elements.
  110. */
  111. static int COVER_map_init(COVER_map_t *map, U32 size) {
  112. map->sizeLog = ZSTD_highbit32(size) + 2;
  113. map->size = (U32)1 << map->sizeLog;
  114. map->sizeMask = map->size - 1;
  115. map->data = (COVER_map_pair_t *)malloc(map->size * sizeof(COVER_map_pair_t));
  116. if (!map->data) {
  117. map->sizeLog = 0;
  118. map->size = 0;
  119. return 0;
  120. }
  121. COVER_map_clear(map);
  122. return 1;
  123. }
  124. /**
  125. * Internal hash function
  126. */
  127. static const U32 COVER_prime4bytes = 2654435761U;
  128. static U32 COVER_map_hash(COVER_map_t *map, U32 key) {
  129. return (key * COVER_prime4bytes) >> (32 - map->sizeLog);
  130. }
  131. /**
  132. * Helper function that returns the index that a key should be placed into.
  133. */
  134. static U32 COVER_map_index(COVER_map_t *map, U32 key) {
  135. const U32 hash = COVER_map_hash(map, key);
  136. U32 i;
  137. for (i = hash;; i = (i + 1) & map->sizeMask) {
  138. COVER_map_pair_t *pos = &map->data[i];
  139. if (pos->value == MAP_EMPTY_VALUE) {
  140. return i;
  141. }
  142. if (pos->key == key) {
  143. return i;
  144. }
  145. }
  146. }
  147. /**
  148. * Returns the pointer to the value for key.
  149. * If key is not in the map, it is inserted and the value is set to 0.
  150. * The map must not be full.
  151. */
  152. static U32 *COVER_map_at(COVER_map_t *map, U32 key) {
  153. COVER_map_pair_t *pos = &map->data[COVER_map_index(map, key)];
  154. if (pos->value == MAP_EMPTY_VALUE) {
  155. pos->key = key;
  156. pos->value = 0;
  157. }
  158. return &pos->value;
  159. }
  160. /**
  161. * Deletes key from the map if present.
  162. */
  163. static void COVER_map_remove(COVER_map_t *map, U32 key) {
  164. U32 i = COVER_map_index(map, key);
  165. COVER_map_pair_t *del = &map->data[i];
  166. U32 shift = 1;
  167. if (del->value == MAP_EMPTY_VALUE) {
  168. return;
  169. }
  170. for (i = (i + 1) & map->sizeMask;; i = (i + 1) & map->sizeMask) {
  171. COVER_map_pair_t *const pos = &map->data[i];
  172. /* If the position is empty we are done */
  173. if (pos->value == MAP_EMPTY_VALUE) {
  174. del->value = MAP_EMPTY_VALUE;
  175. return;
  176. }
  177. /* If pos can be moved to del do so */
  178. if (((i - COVER_map_hash(map, pos->key)) & map->sizeMask) >= shift) {
  179. del->key = pos->key;
  180. del->value = pos->value;
  181. del = pos;
  182. shift = 1;
  183. } else {
  184. ++shift;
  185. }
  186. }
  187. }
  188. /**
  189. * Destroys a map that is inited with COVER_map_init().
  190. */
  191. static void COVER_map_destroy(COVER_map_t *map) {
  192. if (map->data) {
  193. free(map->data);
  194. }
  195. map->data = NULL;
  196. map->size = 0;
  197. }
  198. /*-*************************************
  199. * Context
  200. ***************************************/
  201. typedef struct {
  202. const BYTE *samples;
  203. size_t *offsets;
  204. const size_t *samplesSizes;
  205. size_t nbSamples;
  206. size_t nbTrainSamples;
  207. size_t nbTestSamples;
  208. U32 *suffix;
  209. size_t suffixSize;
  210. U32 *freqs;
  211. U32 *dmerAt;
  212. unsigned d;
  213. } COVER_ctx_t;
  214. /* We need a global context for qsort... */
  215. static COVER_ctx_t *g_coverCtx = NULL;
  216. /*-*************************************
  217. * Helper functions
  218. ***************************************/
  219. /**
  220. * Returns the sum of the sample sizes.
  221. */
  222. size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) {
  223. size_t sum = 0;
  224. unsigned i;
  225. for (i = 0; i < nbSamples; ++i) {
  226. sum += samplesSizes[i];
  227. }
  228. return sum;
  229. }
  230. /**
  231. * Returns -1 if the dmer at lp is less than the dmer at rp.
  232. * Return 0 if the dmers at lp and rp are equal.
  233. * Returns 1 if the dmer at lp is greater than the dmer at rp.
  234. */
  235. static int COVER_cmp(COVER_ctx_t *ctx, const void *lp, const void *rp) {
  236. U32 const lhs = *(U32 const *)lp;
  237. U32 const rhs = *(U32 const *)rp;
  238. return memcmp(ctx->samples + lhs, ctx->samples + rhs, ctx->d);
  239. }
  240. /**
  241. * Faster version for d <= 8.
  242. */
  243. static int COVER_cmp8(COVER_ctx_t *ctx, const void *lp, const void *rp) {
  244. U64 const mask = (ctx->d == 8) ? (U64)-1 : (((U64)1 << (8 * ctx->d)) - 1);
  245. U64 const lhs = MEM_readLE64(ctx->samples + *(U32 const *)lp) & mask;
  246. U64 const rhs = MEM_readLE64(ctx->samples + *(U32 const *)rp) & mask;
  247. if (lhs < rhs) {
  248. return -1;
  249. }
  250. return (lhs > rhs);
  251. }
  252. /**
  253. * Same as COVER_cmp() except ties are broken by pointer value
  254. * NOTE: g_coverCtx must be set to call this function. A global is required because
  255. * qsort doesn't take an opaque pointer.
  256. */
  257. static int WIN_CDECL COVER_strict_cmp(const void *lp, const void *rp) {
  258. int result = COVER_cmp(g_coverCtx, lp, rp);
  259. if (result == 0) {
  260. result = lp < rp ? -1 : 1;
  261. }
  262. return result;
  263. }
  264. /**
  265. * Faster version for d <= 8.
  266. */
  267. static int WIN_CDECL COVER_strict_cmp8(const void *lp, const void *rp) {
  268. int result = COVER_cmp8(g_coverCtx, lp, rp);
  269. if (result == 0) {
  270. result = lp < rp ? -1 : 1;
  271. }
  272. return result;
  273. }
  274. /**
  275. * Returns the first pointer in [first, last) whose element does not compare
  276. * less than value. If no such element exists it returns last.
  277. */
  278. static const size_t *COVER_lower_bound(const size_t *first, const size_t *last,
  279. size_t value) {
  280. size_t count = last - first;
  281. while (count != 0) {
  282. size_t step = count / 2;
  283. const size_t *ptr = first;
  284. ptr += step;
  285. if (*ptr < value) {
  286. first = ++ptr;
  287. count -= step + 1;
  288. } else {
  289. count = step;
  290. }
  291. }
  292. return first;
  293. }
  294. /**
  295. * Generic groupBy function.
  296. * Groups an array sorted by cmp into groups with equivalent values.
  297. * Calls grp for each group.
  298. */
  299. static void
  300. COVER_groupBy(const void *data, size_t count, size_t size, COVER_ctx_t *ctx,
  301. int (*cmp)(COVER_ctx_t *, const void *, const void *),
  302. void (*grp)(COVER_ctx_t *, const void *, const void *)) {
  303. const BYTE *ptr = (const BYTE *)data;
  304. size_t num = 0;
  305. while (num < count) {
  306. const BYTE *grpEnd = ptr + size;
  307. ++num;
  308. while (num < count && cmp(ctx, ptr, grpEnd) == 0) {
  309. grpEnd += size;
  310. ++num;
  311. }
  312. grp(ctx, ptr, grpEnd);
  313. ptr = grpEnd;
  314. }
  315. }
  316. /*-*************************************
  317. * Cover functions
  318. ***************************************/
  319. /**
  320. * Called on each group of positions with the same dmer.
  321. * Counts the frequency of each dmer and saves it in the suffix array.
  322. * Fills `ctx->dmerAt`.
  323. */
  324. static void COVER_group(COVER_ctx_t *ctx, const void *group,
  325. const void *groupEnd) {
  326. /* The group consists of all the positions with the same first d bytes. */
  327. const U32 *grpPtr = (const U32 *)group;
  328. const U32 *grpEnd = (const U32 *)groupEnd;
  329. /* The dmerId is how we will reference this dmer.
  330. * This allows us to map the whole dmer space to a much smaller space, the
  331. * size of the suffix array.
  332. */
  333. const U32 dmerId = (U32)(grpPtr - ctx->suffix);
  334. /* Count the number of samples this dmer shows up in */
  335. U32 freq = 0;
  336. /* Details */
  337. const size_t *curOffsetPtr = ctx->offsets;
  338. const size_t *offsetsEnd = ctx->offsets + ctx->nbSamples;
  339. /* Once *grpPtr >= curSampleEnd this occurrence of the dmer is in a
  340. * different sample than the last.
  341. */
  342. size_t curSampleEnd = ctx->offsets[0];
  343. for (; grpPtr != grpEnd; ++grpPtr) {
  344. /* Save the dmerId for this position so we can get back to it. */
  345. ctx->dmerAt[*grpPtr] = dmerId;
  346. /* Dictionaries only help for the first reference to the dmer.
  347. * After that zstd can reference the match from the previous reference.
  348. * So only count each dmer once for each sample it is in.
  349. */
  350. if (*grpPtr < curSampleEnd) {
  351. continue;
  352. }
  353. freq += 1;
  354. /* Binary search to find the end of the sample *grpPtr is in.
  355. * In the common case that grpPtr + 1 == grpEnd we can skip the binary
  356. * search because the loop is over.
  357. */
  358. if (grpPtr + 1 != grpEnd) {
  359. const size_t *sampleEndPtr =
  360. COVER_lower_bound(curOffsetPtr, offsetsEnd, *grpPtr);
  361. curSampleEnd = *sampleEndPtr;
  362. curOffsetPtr = sampleEndPtr + 1;
  363. }
  364. }
  365. /* At this point we are never going to look at this segment of the suffix
  366. * array again. We take advantage of this fact to save memory.
  367. * We store the frequency of the dmer in the first position of the group,
  368. * which is dmerId.
  369. */
  370. ctx->suffix[dmerId] = freq;
  371. }
  372. /**
  373. * Selects the best segment in an epoch.
  374. * Segments of are scored according to the function:
  375. *
  376. * Let F(d) be the frequency of dmer d.
  377. * Let S_i be the dmer at position i of segment S which has length k.
  378. *
  379. * Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
  380. *
  381. * Once the dmer d is in the dictionary we set F(d) = 0.
  382. */
  383. static COVER_segment_t COVER_selectSegment(const COVER_ctx_t *ctx, U32 *freqs,
  384. COVER_map_t *activeDmers, U32 begin,
  385. U32 end,
  386. ZDICT_cover_params_t parameters) {
  387. /* Constants */
  388. const U32 k = parameters.k;
  389. const U32 d = parameters.d;
  390. const U32 dmersInK = k - d + 1;
  391. /* Try each segment (activeSegment) and save the best (bestSegment) */
  392. COVER_segment_t bestSegment = {0, 0, 0};
  393. COVER_segment_t activeSegment;
  394. /* Reset the activeDmers in the segment */
  395. COVER_map_clear(activeDmers);
  396. /* The activeSegment starts at the beginning of the epoch. */
  397. activeSegment.begin = begin;
  398. activeSegment.end = begin;
  399. activeSegment.score = 0;
  400. /* Slide the activeSegment through the whole epoch.
  401. * Save the best segment in bestSegment.
  402. */
  403. while (activeSegment.end < end) {
  404. /* The dmerId for the dmer at the next position */
  405. U32 newDmer = ctx->dmerAt[activeSegment.end];
  406. /* The entry in activeDmers for this dmerId */
  407. U32 *newDmerOcc = COVER_map_at(activeDmers, newDmer);
  408. /* If the dmer isn't already present in the segment add its score. */
  409. if (*newDmerOcc == 0) {
  410. /* The paper suggest using the L-0.5 norm, but experiments show that it
  411. * doesn't help.
  412. */
  413. activeSegment.score += freqs[newDmer];
  414. }
  415. /* Add the dmer to the segment */
  416. activeSegment.end += 1;
  417. *newDmerOcc += 1;
  418. /* If the window is now too large, drop the first position */
  419. if (activeSegment.end - activeSegment.begin == dmersInK + 1) {
  420. U32 delDmer = ctx->dmerAt[activeSegment.begin];
  421. U32 *delDmerOcc = COVER_map_at(activeDmers, delDmer);
  422. activeSegment.begin += 1;
  423. *delDmerOcc -= 1;
  424. /* If this is the last occurrence of the dmer, subtract its score */
  425. if (*delDmerOcc == 0) {
  426. COVER_map_remove(activeDmers, delDmer);
  427. activeSegment.score -= freqs[delDmer];
  428. }
  429. }
  430. /* If this segment is the best so far save it */
  431. if (activeSegment.score > bestSegment.score) {
  432. bestSegment = activeSegment;
  433. }
  434. }
  435. {
  436. /* Trim off the zero frequency head and tail from the segment. */
  437. U32 newBegin = bestSegment.end;
  438. U32 newEnd = bestSegment.begin;
  439. U32 pos;
  440. for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
  441. U32 freq = freqs[ctx->dmerAt[pos]];
  442. if (freq != 0) {
  443. newBegin = MIN(newBegin, pos);
  444. newEnd = pos + 1;
  445. }
  446. }
  447. bestSegment.begin = newBegin;
  448. bestSegment.end = newEnd;
  449. }
  450. {
  451. /* Zero out the frequency of each dmer covered by the chosen segment. */
  452. U32 pos;
  453. for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
  454. freqs[ctx->dmerAt[pos]] = 0;
  455. }
  456. }
  457. return bestSegment;
  458. }
  459. /**
  460. * Check the validity of the parameters.
  461. * Returns non-zero if the parameters are valid and 0 otherwise.
  462. */
  463. static int COVER_checkParameters(ZDICT_cover_params_t parameters,
  464. size_t maxDictSize) {
  465. /* k and d are required parameters */
  466. if (parameters.d == 0 || parameters.k == 0) {
  467. return 0;
  468. }
  469. /* k <= maxDictSize */
  470. if (parameters.k > maxDictSize) {
  471. return 0;
  472. }
  473. /* d <= k */
  474. if (parameters.d > parameters.k) {
  475. return 0;
  476. }
  477. /* 0 < splitPoint <= 1 */
  478. if (parameters.splitPoint <= 0 || parameters.splitPoint > 1){
  479. return 0;
  480. }
  481. return 1;
  482. }
  483. /**
  484. * Clean up a context initialized with `COVER_ctx_init()`.
  485. */
  486. static void COVER_ctx_destroy(COVER_ctx_t *ctx) {
  487. if (!ctx) {
  488. return;
  489. }
  490. if (ctx->suffix) {
  491. free(ctx->suffix);
  492. ctx->suffix = NULL;
  493. }
  494. if (ctx->freqs) {
  495. free(ctx->freqs);
  496. ctx->freqs = NULL;
  497. }
  498. if (ctx->dmerAt) {
  499. free(ctx->dmerAt);
  500. ctx->dmerAt = NULL;
  501. }
  502. if (ctx->offsets) {
  503. free(ctx->offsets);
  504. ctx->offsets = NULL;
  505. }
  506. }
  507. /**
  508. * Prepare a context for dictionary building.
  509. * The context is only dependent on the parameter `d` and can used multiple
  510. * times.
  511. * Returns 0 on success or error code on error.
  512. * The context must be destroyed with `COVER_ctx_destroy()`.
  513. */
  514. static size_t COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,
  515. const size_t *samplesSizes, unsigned nbSamples,
  516. unsigned d, double splitPoint) {
  517. const BYTE *const samples = (const BYTE *)samplesBuffer;
  518. const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples);
  519. /* Split samples into testing and training sets */
  520. const unsigned nbTrainSamples = splitPoint < 1.0 ? (unsigned)((double)nbSamples * splitPoint) : nbSamples;
  521. const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples;
  522. const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize;
  523. const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize;
  524. /* Checks */
  525. if (totalSamplesSize < MAX(d, sizeof(U64)) ||
  526. totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) {
  527. DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n",
  528. (unsigned)(totalSamplesSize>>20), (COVER_MAX_SAMPLES_SIZE >> 20));
  529. return ERROR(srcSize_wrong);
  530. }
  531. /* Check if there are at least 5 training samples */
  532. if (nbTrainSamples < 5) {
  533. DISPLAYLEVEL(1, "Total number of training samples is %u and is invalid.", nbTrainSamples);
  534. return ERROR(srcSize_wrong);
  535. }
  536. /* Check if there's testing sample */
  537. if (nbTestSamples < 1) {
  538. DISPLAYLEVEL(1, "Total number of testing samples is %u and is invalid.", nbTestSamples);
  539. return ERROR(srcSize_wrong);
  540. }
  541. /* Zero the context */
  542. memset(ctx, 0, sizeof(*ctx));
  543. DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples,
  544. (unsigned)trainingSamplesSize);
  545. DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples,
  546. (unsigned)testSamplesSize);
  547. ctx->samples = samples;
  548. ctx->samplesSizes = samplesSizes;
  549. ctx->nbSamples = nbSamples;
  550. ctx->nbTrainSamples = nbTrainSamples;
  551. ctx->nbTestSamples = nbTestSamples;
  552. /* Partial suffix array */
  553. ctx->suffixSize = trainingSamplesSize - MAX(d, sizeof(U64)) + 1;
  554. ctx->suffix = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
  555. /* Maps index to the dmerID */
  556. ctx->dmerAt = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
  557. /* The offsets of each file */
  558. ctx->offsets = (size_t *)malloc((nbSamples + 1) * sizeof(size_t));
  559. if (!ctx->suffix || !ctx->dmerAt || !ctx->offsets) {
  560. DISPLAYLEVEL(1, "Failed to allocate scratch buffers\n");
  561. COVER_ctx_destroy(ctx);
  562. return ERROR(memory_allocation);
  563. }
  564. ctx->freqs = NULL;
  565. ctx->d = d;
  566. /* Fill offsets from the samplesSizes */
  567. {
  568. U32 i;
  569. ctx->offsets[0] = 0;
  570. for (i = 1; i <= nbSamples; ++i) {
  571. ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1];
  572. }
  573. }
  574. DISPLAYLEVEL(2, "Constructing partial suffix array\n");
  575. {
  576. /* suffix is a partial suffix array.
  577. * It only sorts suffixes by their first parameters.d bytes.
  578. * The sort is stable, so each dmer group is sorted by position in input.
  579. */
  580. U32 i;
  581. for (i = 0; i < ctx->suffixSize; ++i) {
  582. ctx->suffix[i] = i;
  583. }
  584. /* qsort doesn't take an opaque pointer, so pass as a global.
  585. * On OpenBSD qsort() is not guaranteed to be stable, their mergesort() is.
  586. */
  587. g_coverCtx = ctx;
  588. #if defined(__OpenBSD__)
  589. mergesort(ctx->suffix, ctx->suffixSize, sizeof(U32),
  590. (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
  591. #else
  592. qsort(ctx->suffix, ctx->suffixSize, sizeof(U32),
  593. (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
  594. #endif
  595. }
  596. DISPLAYLEVEL(2, "Computing frequencies\n");
  597. /* For each dmer group (group of positions with the same first d bytes):
  598. * 1. For each position we set dmerAt[position] = dmerID. The dmerID is
  599. * (groupBeginPtr - suffix). This allows us to go from position to
  600. * dmerID so we can look up values in freq.
  601. * 2. We calculate how many samples the dmer occurs in and save it in
  602. * freqs[dmerId].
  603. */
  604. COVER_groupBy(ctx->suffix, ctx->suffixSize, sizeof(U32), ctx,
  605. (ctx->d <= 8 ? &COVER_cmp8 : &COVER_cmp), &COVER_group);
  606. ctx->freqs = ctx->suffix;
  607. ctx->suffix = NULL;
  608. return 0;
  609. }
  610. void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel)
  611. {
  612. const double ratio = (double)nbDmers / maxDictSize;
  613. if (ratio >= 10) {
  614. return;
  615. }
  616. LOCALDISPLAYLEVEL(displayLevel, 1,
  617. "WARNING: The maximum dictionary size %u is too large "
  618. "compared to the source size %u! "
  619. "size(source)/size(dictionary) = %f, but it should be >= "
  620. "10! This may lead to a subpar dictionary! We recommend "
  621. "training on sources at least 10x, and preferably 100x "
  622. "the size of the dictionary! \n", (U32)maxDictSize,
  623. (U32)nbDmers, ratio);
  624. }
  625. COVER_epoch_info_t COVER_computeEpochs(U32 maxDictSize,
  626. U32 nbDmers, U32 k, U32 passes)
  627. {
  628. const U32 minEpochSize = k * 10;
  629. COVER_epoch_info_t epochs;
  630. epochs.num = MAX(1, maxDictSize / k / passes);
  631. epochs.size = nbDmers / epochs.num;
  632. if (epochs.size >= minEpochSize) {
  633. assert(epochs.size * epochs.num <= nbDmers);
  634. return epochs;
  635. }
  636. epochs.size = MIN(minEpochSize, nbDmers);
  637. epochs.num = nbDmers / epochs.size;
  638. assert(epochs.size * epochs.num <= nbDmers);
  639. return epochs;
  640. }
  641. /**
  642. * Given the prepared context build the dictionary.
  643. */
  644. static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs,
  645. COVER_map_t *activeDmers, void *dictBuffer,
  646. size_t dictBufferCapacity,
  647. ZDICT_cover_params_t parameters) {
  648. BYTE *const dict = (BYTE *)dictBuffer;
  649. size_t tail = dictBufferCapacity;
  650. /* Divide the data into epochs. We will select one segment from each epoch. */
  651. const COVER_epoch_info_t epochs = COVER_computeEpochs(
  652. (U32)dictBufferCapacity, (U32)ctx->suffixSize, parameters.k, 4);
  653. const size_t maxZeroScoreRun = MAX(10, MIN(100, epochs.num >> 3));
  654. size_t zeroScoreRun = 0;
  655. size_t epoch;
  656. DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n",
  657. (U32)epochs.num, (U32)epochs.size);
  658. /* Loop through the epochs until there are no more segments or the dictionary
  659. * is full.
  660. */
  661. for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num) {
  662. const U32 epochBegin = (U32)(epoch * epochs.size);
  663. const U32 epochEnd = epochBegin + epochs.size;
  664. size_t segmentSize;
  665. /* Select a segment */
  666. COVER_segment_t segment = COVER_selectSegment(
  667. ctx, freqs, activeDmers, epochBegin, epochEnd, parameters);
  668. /* If the segment covers no dmers, then we are out of content.
  669. * There may be new content in other epochs, for continue for some time.
  670. */
  671. if (segment.score == 0) {
  672. if (++zeroScoreRun >= maxZeroScoreRun) {
  673. break;
  674. }
  675. continue;
  676. }
  677. zeroScoreRun = 0;
  678. /* Trim the segment if necessary and if it is too small then we are done */
  679. segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail);
  680. if (segmentSize < parameters.d) {
  681. break;
  682. }
  683. /* We fill the dictionary from the back to allow the best segments to be
  684. * referenced with the smallest offsets.
  685. */
  686. tail -= segmentSize;
  687. memcpy(dict + tail, ctx->samples + segment.begin, segmentSize);
  688. DISPLAYUPDATE(
  689. 2, "\r%u%% ",
  690. (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity));
  691. }
  692. DISPLAYLEVEL(2, "\r%79s\r", "");
  693. return tail;
  694. }
  695. ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover(
  696. void *dictBuffer, size_t dictBufferCapacity,
  697. const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,
  698. ZDICT_cover_params_t parameters)
  699. {
  700. BYTE* const dict = (BYTE*)dictBuffer;
  701. COVER_ctx_t ctx;
  702. COVER_map_t activeDmers;
  703. parameters.splitPoint = 1.0;
  704. /* Initialize global data */
  705. g_displayLevel = (int)parameters.zParams.notificationLevel;
  706. /* Checks */
  707. if (!COVER_checkParameters(parameters, dictBufferCapacity)) {
  708. DISPLAYLEVEL(1, "Cover parameters incorrect\n");
  709. return ERROR(parameter_outOfBound);
  710. }
  711. if (nbSamples == 0) {
  712. DISPLAYLEVEL(1, "Cover must have at least one input file\n");
  713. return ERROR(srcSize_wrong);
  714. }
  715. if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
  716. DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
  717. ZDICT_DICTSIZE_MIN);
  718. return ERROR(dstSize_tooSmall);
  719. }
  720. /* Initialize context and activeDmers */
  721. {
  722. size_t const initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples,
  723. parameters.d, parameters.splitPoint);
  724. if (ZSTD_isError(initVal)) {
  725. return initVal;
  726. }
  727. }
  728. COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, g_displayLevel);
  729. if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
  730. DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
  731. COVER_ctx_destroy(&ctx);
  732. return ERROR(memory_allocation);
  733. }
  734. DISPLAYLEVEL(2, "Building dictionary\n");
  735. {
  736. const size_t tail =
  737. COVER_buildDictionary(&ctx, ctx.freqs, &activeDmers, dictBuffer,
  738. dictBufferCapacity, parameters);
  739. const size_t dictionarySize = ZDICT_finalizeDictionary(
  740. dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
  741. samplesBuffer, samplesSizes, nbSamples, parameters.zParams);
  742. if (!ZSTD_isError(dictionarySize)) {
  743. DISPLAYLEVEL(2, "Constructed dictionary of size %u\n",
  744. (unsigned)dictionarySize);
  745. }
  746. COVER_ctx_destroy(&ctx);
  747. COVER_map_destroy(&activeDmers);
  748. return dictionarySize;
  749. }
  750. }
  751. size_t COVER_checkTotalCompressedSize(const ZDICT_cover_params_t parameters,
  752. const size_t *samplesSizes, const BYTE *samples,
  753. size_t *offsets,
  754. size_t nbTrainSamples, size_t nbSamples,
  755. BYTE *const dict, size_t dictBufferCapacity) {
  756. size_t totalCompressedSize = ERROR(GENERIC);
  757. /* Pointers */
  758. ZSTD_CCtx *cctx;
  759. ZSTD_CDict *cdict;
  760. void *dst;
  761. /* Local variables */
  762. size_t dstCapacity;
  763. size_t i;
  764. /* Allocate dst with enough space to compress the maximum sized sample */
  765. {
  766. size_t maxSampleSize = 0;
  767. i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0;
  768. for (; i < nbSamples; ++i) {
  769. maxSampleSize = MAX(samplesSizes[i], maxSampleSize);
  770. }
  771. dstCapacity = ZSTD_compressBound(maxSampleSize);
  772. dst = malloc(dstCapacity);
  773. }
  774. /* Create the cctx and cdict */
  775. cctx = ZSTD_createCCtx();
  776. cdict = ZSTD_createCDict(dict, dictBufferCapacity,
  777. parameters.zParams.compressionLevel);
  778. if (!dst || !cctx || !cdict) {
  779. goto _compressCleanup;
  780. }
  781. /* Compress each sample and sum their sizes (or error) */
  782. totalCompressedSize = dictBufferCapacity;
  783. i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0;
  784. for (; i < nbSamples; ++i) {
  785. const size_t size = ZSTD_compress_usingCDict(
  786. cctx, dst, dstCapacity, samples + offsets[i],
  787. samplesSizes[i], cdict);
  788. if (ZSTD_isError(size)) {
  789. totalCompressedSize = size;
  790. goto _compressCleanup;
  791. }
  792. totalCompressedSize += size;
  793. }
  794. _compressCleanup:
  795. ZSTD_freeCCtx(cctx);
  796. ZSTD_freeCDict(cdict);
  797. if (dst) {
  798. free(dst);
  799. }
  800. return totalCompressedSize;
  801. }
  802. /**
  803. * Initialize the `COVER_best_t`.
  804. */
  805. void COVER_best_init(COVER_best_t *best) {
  806. if (best==NULL) return; /* compatible with init on NULL */
  807. (void)ZSTD_pthread_mutex_init(&best->mutex, NULL);
  808. (void)ZSTD_pthread_cond_init(&best->cond, NULL);
  809. best->liveJobs = 0;
  810. best->dict = NULL;
  811. best->dictSize = 0;
  812. best->compressedSize = (size_t)-1;
  813. memset(&best->parameters, 0, sizeof(best->parameters));
  814. }
  815. /**
  816. * Wait until liveJobs == 0.
  817. */
  818. void COVER_best_wait(COVER_best_t *best) {
  819. if (!best) {
  820. return;
  821. }
  822. ZSTD_pthread_mutex_lock(&best->mutex);
  823. while (best->liveJobs != 0) {
  824. ZSTD_pthread_cond_wait(&best->cond, &best->mutex);
  825. }
  826. ZSTD_pthread_mutex_unlock(&best->mutex);
  827. }
  828. /**
  829. * Call COVER_best_wait() and then destroy the COVER_best_t.
  830. */
  831. void COVER_best_destroy(COVER_best_t *best) {
  832. if (!best) {
  833. return;
  834. }
  835. COVER_best_wait(best);
  836. if (best->dict) {
  837. free(best->dict);
  838. }
  839. ZSTD_pthread_mutex_destroy(&best->mutex);
  840. ZSTD_pthread_cond_destroy(&best->cond);
  841. }
  842. /**
  843. * Called when a thread is about to be launched.
  844. * Increments liveJobs.
  845. */
  846. void COVER_best_start(COVER_best_t *best) {
  847. if (!best) {
  848. return;
  849. }
  850. ZSTD_pthread_mutex_lock(&best->mutex);
  851. ++best->liveJobs;
  852. ZSTD_pthread_mutex_unlock(&best->mutex);
  853. }
  854. /**
  855. * Called when a thread finishes executing, both on error or success.
  856. * Decrements liveJobs and signals any waiting threads if liveJobs == 0.
  857. * If this dictionary is the best so far save it and its parameters.
  858. */
  859. void COVER_best_finish(COVER_best_t *best, ZDICT_cover_params_t parameters,
  860. COVER_dictSelection_t selection) {
  861. void* dict = selection.dictContent;
  862. size_t compressedSize = selection.totalCompressedSize;
  863. size_t dictSize = selection.dictSize;
  864. if (!best) {
  865. return;
  866. }
  867. {
  868. size_t liveJobs;
  869. ZSTD_pthread_mutex_lock(&best->mutex);
  870. --best->liveJobs;
  871. liveJobs = best->liveJobs;
  872. /* If the new dictionary is better */
  873. if (compressedSize < best->compressedSize) {
  874. /* Allocate space if necessary */
  875. if (!best->dict || best->dictSize < dictSize) {
  876. if (best->dict) {
  877. free(best->dict);
  878. }
  879. best->dict = malloc(dictSize);
  880. if (!best->dict) {
  881. best->compressedSize = ERROR(GENERIC);
  882. best->dictSize = 0;
  883. ZSTD_pthread_cond_signal(&best->cond);
  884. ZSTD_pthread_mutex_unlock(&best->mutex);
  885. return;
  886. }
  887. }
  888. /* Save the dictionary, parameters, and size */
  889. if (dict) {
  890. memcpy(best->dict, dict, dictSize);
  891. best->dictSize = dictSize;
  892. best->parameters = parameters;
  893. best->compressedSize = compressedSize;
  894. }
  895. }
  896. if (liveJobs == 0) {
  897. ZSTD_pthread_cond_broadcast(&best->cond);
  898. }
  899. ZSTD_pthread_mutex_unlock(&best->mutex);
  900. }
  901. }
  902. COVER_dictSelection_t COVER_dictSelectionError(size_t error) {
  903. COVER_dictSelection_t selection = { NULL, 0, error };
  904. return selection;
  905. }
  906. unsigned COVER_dictSelectionIsError(COVER_dictSelection_t selection) {
  907. return (ZSTD_isError(selection.totalCompressedSize) || !selection.dictContent);
  908. }
  909. void COVER_dictSelectionFree(COVER_dictSelection_t selection){
  910. free(selection.dictContent);
  911. }
  912. COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent, size_t dictBufferCapacity,
  913. size_t dictContentSize, const BYTE* samplesBuffer, const size_t* samplesSizes, unsigned nbFinalizeSamples,
  914. size_t nbCheckSamples, size_t nbSamples, ZDICT_cover_params_t params, size_t* offsets, size_t totalCompressedSize) {
  915. size_t largestDict = 0;
  916. size_t largestCompressed = 0;
  917. BYTE* customDictContentEnd = customDictContent + dictContentSize;
  918. BYTE * largestDictbuffer = (BYTE *)malloc(dictBufferCapacity);
  919. BYTE * candidateDictBuffer = (BYTE *)malloc(dictBufferCapacity);
  920. double regressionTolerance = ((double)params.shrinkDictMaxRegression / 100.0) + 1.00;
  921. if (!largestDictbuffer || !candidateDictBuffer) {
  922. free(largestDictbuffer);
  923. free(candidateDictBuffer);
  924. return COVER_dictSelectionError(dictContentSize);
  925. }
  926. /* Initial dictionary size and compressed size */
  927. memcpy(largestDictbuffer, customDictContent, dictContentSize);
  928. dictContentSize = ZDICT_finalizeDictionary(
  929. largestDictbuffer, dictBufferCapacity, customDictContent, dictContentSize,
  930. samplesBuffer, samplesSizes, nbFinalizeSamples, params.zParams);
  931. if (ZDICT_isError(dictContentSize)) {
  932. free(largestDictbuffer);
  933. free(candidateDictBuffer);
  934. return COVER_dictSelectionError(dictContentSize);
  935. }
  936. totalCompressedSize = COVER_checkTotalCompressedSize(params, samplesSizes,
  937. samplesBuffer, offsets,
  938. nbCheckSamples, nbSamples,
  939. largestDictbuffer, dictContentSize);
  940. if (ZSTD_isError(totalCompressedSize)) {
  941. free(largestDictbuffer);
  942. free(candidateDictBuffer);
  943. return COVER_dictSelectionError(totalCompressedSize);
  944. }
  945. if (params.shrinkDict == 0) {
  946. COVER_dictSelection_t selection = { largestDictbuffer, dictContentSize, totalCompressedSize };
  947. free(candidateDictBuffer);
  948. return selection;
  949. }
  950. largestDict = dictContentSize;
  951. largestCompressed = totalCompressedSize;
  952. dictContentSize = ZDICT_DICTSIZE_MIN;
  953. /* Largest dict is initially at least ZDICT_DICTSIZE_MIN */
  954. while (dictContentSize < largestDict) {
  955. memcpy(candidateDictBuffer, largestDictbuffer, largestDict);
  956. dictContentSize = ZDICT_finalizeDictionary(
  957. candidateDictBuffer, dictBufferCapacity, customDictContentEnd - dictContentSize, dictContentSize,
  958. samplesBuffer, samplesSizes, nbFinalizeSamples, params.zParams);
  959. if (ZDICT_isError(dictContentSize)) {
  960. free(largestDictbuffer);
  961. free(candidateDictBuffer);
  962. return COVER_dictSelectionError(dictContentSize);
  963. }
  964. totalCompressedSize = COVER_checkTotalCompressedSize(params, samplesSizes,
  965. samplesBuffer, offsets,
  966. nbCheckSamples, nbSamples,
  967. candidateDictBuffer, dictContentSize);
  968. if (ZSTD_isError(totalCompressedSize)) {
  969. free(largestDictbuffer);
  970. free(candidateDictBuffer);
  971. return COVER_dictSelectionError(totalCompressedSize);
  972. }
  973. if (totalCompressedSize <= largestCompressed * regressionTolerance) {
  974. COVER_dictSelection_t selection = { candidateDictBuffer, dictContentSize, totalCompressedSize };
  975. free(largestDictbuffer);
  976. return selection;
  977. }
  978. dictContentSize *= 2;
  979. }
  980. dictContentSize = largestDict;
  981. totalCompressedSize = largestCompressed;
  982. {
  983. COVER_dictSelection_t selection = { largestDictbuffer, dictContentSize, totalCompressedSize };
  984. free(candidateDictBuffer);
  985. return selection;
  986. }
  987. }
  988. /**
  989. * Parameters for COVER_tryParameters().
  990. */
  991. typedef struct COVER_tryParameters_data_s {
  992. const COVER_ctx_t *ctx;
  993. COVER_best_t *best;
  994. size_t dictBufferCapacity;
  995. ZDICT_cover_params_t parameters;
  996. } COVER_tryParameters_data_t;
  997. /**
  998. * Tries a set of parameters and updates the COVER_best_t with the results.
  999. * This function is thread safe if zstd is compiled with multithreaded support.
  1000. * It takes its parameters as an *OWNING* opaque pointer to support threading.
  1001. */
  1002. static void COVER_tryParameters(void *opaque)
  1003. {
  1004. /* Save parameters as local variables */
  1005. COVER_tryParameters_data_t *const data = (COVER_tryParameters_data_t*)opaque;
  1006. const COVER_ctx_t *const ctx = data->ctx;
  1007. const ZDICT_cover_params_t parameters = data->parameters;
  1008. size_t dictBufferCapacity = data->dictBufferCapacity;
  1009. size_t totalCompressedSize = ERROR(GENERIC);
  1010. /* Allocate space for hash table, dict, and freqs */
  1011. COVER_map_t activeDmers;
  1012. BYTE* const dict = (BYTE*)malloc(dictBufferCapacity);
  1013. COVER_dictSelection_t selection = COVER_dictSelectionError(ERROR(GENERIC));
  1014. U32* const freqs = (U32*)malloc(ctx->suffixSize * sizeof(U32));
  1015. if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
  1016. DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
  1017. goto _cleanup;
  1018. }
  1019. if (!dict || !freqs) {
  1020. DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n");
  1021. goto _cleanup;
  1022. }
  1023. /* Copy the frequencies because we need to modify them */
  1024. memcpy(freqs, ctx->freqs, ctx->suffixSize * sizeof(U32));
  1025. /* Build the dictionary */
  1026. {
  1027. const size_t tail = COVER_buildDictionary(ctx, freqs, &activeDmers, dict,
  1028. dictBufferCapacity, parameters);
  1029. selection = COVER_selectDict(dict + tail, dictBufferCapacity, dictBufferCapacity - tail,
  1030. ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbTrainSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets,
  1031. totalCompressedSize);
  1032. if (COVER_dictSelectionIsError(selection)) {
  1033. DISPLAYLEVEL(1, "Failed to select dictionary\n");
  1034. goto _cleanup;
  1035. }
  1036. }
  1037. _cleanup:
  1038. free(dict);
  1039. COVER_best_finish(data->best, parameters, selection);
  1040. free(data);
  1041. COVER_map_destroy(&activeDmers);
  1042. COVER_dictSelectionFree(selection);
  1043. free(freqs);
  1044. }
  1045. ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover(
  1046. void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer,
  1047. const size_t* samplesSizes, unsigned nbSamples,
  1048. ZDICT_cover_params_t* parameters)
  1049. {
  1050. /* constants */
  1051. const unsigned nbThreads = parameters->nbThreads;
  1052. const double splitPoint =
  1053. parameters->splitPoint <= 0.0 ? COVER_DEFAULT_SPLITPOINT : parameters->splitPoint;
  1054. const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d;
  1055. const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d;
  1056. const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k;
  1057. const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k;
  1058. const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps;
  1059. const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1);
  1060. const unsigned kIterations =
  1061. (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);
  1062. const unsigned shrinkDict = 0;
  1063. /* Local variables */
  1064. const int displayLevel = parameters->zParams.notificationLevel;
  1065. unsigned iteration = 1;
  1066. unsigned d;
  1067. unsigned k;
  1068. COVER_best_t best;
  1069. POOL_ctx *pool = NULL;
  1070. int warned = 0;
  1071. /* Checks */
  1072. if (splitPoint <= 0 || splitPoint > 1) {
  1073. LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n");
  1074. return ERROR(parameter_outOfBound);
  1075. }
  1076. if (kMinK < kMaxD || kMaxK < kMinK) {
  1077. LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n");
  1078. return ERROR(parameter_outOfBound);
  1079. }
  1080. if (nbSamples == 0) {
  1081. DISPLAYLEVEL(1, "Cover must have at least one input file\n");
  1082. return ERROR(srcSize_wrong);
  1083. }
  1084. if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
  1085. DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
  1086. ZDICT_DICTSIZE_MIN);
  1087. return ERROR(dstSize_tooSmall);
  1088. }
  1089. if (nbThreads > 1) {
  1090. pool = POOL_create(nbThreads, 1);
  1091. if (!pool) {
  1092. return ERROR(memory_allocation);
  1093. }
  1094. }
  1095. /* Initialization */
  1096. COVER_best_init(&best);
  1097. /* Turn down global display level to clean up display at level 2 and below */
  1098. g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1;
  1099. /* Loop through d first because each new value needs a new context */
  1100. LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n",
  1101. kIterations);
  1102. for (d = kMinD; d <= kMaxD; d += 2) {
  1103. /* Initialize the context for this value of d */
  1104. COVER_ctx_t ctx;
  1105. LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d);
  1106. {
  1107. const size_t initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint);
  1108. if (ZSTD_isError(initVal)) {
  1109. LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n");
  1110. COVER_best_destroy(&best);
  1111. POOL_free(pool);
  1112. return initVal;
  1113. }
  1114. }
  1115. if (!warned) {
  1116. COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, displayLevel);
  1117. warned = 1;
  1118. }
  1119. /* Loop through k reusing the same context */
  1120. for (k = kMinK; k <= kMaxK; k += kStepSize) {
  1121. /* Prepare the arguments */
  1122. COVER_tryParameters_data_t *data = (COVER_tryParameters_data_t *)malloc(
  1123. sizeof(COVER_tryParameters_data_t));
  1124. LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k);
  1125. if (!data) {
  1126. LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n");
  1127. COVER_best_destroy(&best);
  1128. COVER_ctx_destroy(&ctx);
  1129. POOL_free(pool);
  1130. return ERROR(memory_allocation);
  1131. }
  1132. data->ctx = &ctx;
  1133. data->best = &best;
  1134. data->dictBufferCapacity = dictBufferCapacity;
  1135. data->parameters = *parameters;
  1136. data->parameters.k = k;
  1137. data->parameters.d = d;
  1138. data->parameters.splitPoint = splitPoint;
  1139. data->parameters.steps = kSteps;
  1140. data->parameters.shrinkDict = shrinkDict;
  1141. data->parameters.zParams.notificationLevel = g_displayLevel;
  1142. /* Check the parameters */
  1143. if (!COVER_checkParameters(data->parameters, dictBufferCapacity)) {
  1144. DISPLAYLEVEL(1, "Cover parameters incorrect\n");
  1145. free(data);
  1146. continue;
  1147. }
  1148. /* Call the function and pass ownership of data to it */
  1149. COVER_best_start(&best);
  1150. if (pool) {
  1151. POOL_add(pool, &COVER_tryParameters, data);
  1152. } else {
  1153. COVER_tryParameters(data);
  1154. }
  1155. /* Print status */
  1156. LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ",
  1157. (unsigned)((iteration * 100) / kIterations));
  1158. ++iteration;
  1159. }
  1160. COVER_best_wait(&best);
  1161. COVER_ctx_destroy(&ctx);
  1162. }
  1163. LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", "");
  1164. /* Fill the output buffer and parameters with output of the best parameters */
  1165. {
  1166. const size_t dictSize = best.dictSize;
  1167. if (ZSTD_isError(best.compressedSize)) {
  1168. const size_t compressedSize = best.compressedSize;
  1169. COVER_best_destroy(&best);
  1170. POOL_free(pool);
  1171. return compressedSize;
  1172. }
  1173. *parameters = best.parameters;
  1174. memcpy(dictBuffer, best.dict, dictSize);
  1175. COVER_best_destroy(&best);
  1176. POOL_free(pool);
  1177. return dictSize;
  1178. }
  1179. }