lz4.odin 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542
  1. package vendor_compress_lz4
  2. when ODIN_OS == .Windows {
  3. @(extra_linker_flags="/NODEFAULTLIB:libcmt")
  4. foreign import lib "lib/liblz4_static.lib"
  5. }
  6. import "core:c"
  7. VERSION_MAJOR :: 1 /* for breaking interface changes */
  8. VERSION_MINOR :: 10 /* for new (non-breaking) interface capabilities */
  9. VERSION_RELEASE :: 0 /* for tweaks, bug-fixes, or development */
  10. VERSION_NUMBER :: VERSION_MAJOR *100*100 + VERSION_MINOR *100 + VERSION_RELEASE
  11. MEMORY_USAGE_MIN :: 10
  12. MEMORY_USAGE_DEFAULT :: 14
  13. MEMORY_USAGE_MAX :: 20
  14. MEMORY_USAGE :: MEMORY_USAGE_DEFAULT
  15. MAX_INPUT_SIZE :: 0x7E000000 /* 2_113_929_216 bytes */
  16. COMPRESSBOUND :: #force_inline proc "c" (isize: c.int) -> c.int {
  17. return u32(isize) > MAX_INPUT_SIZE ? 0 : isize + (isize/255) + 16
  18. }
  19. DECODER_RING_BUFFER_SIZE :: #force_inline proc "c" (maxBlockSize: c.int) -> c.int {
  20. return 65536 + 14 + maxBlockSize /* for static allocation; maxBlockSize presumed valid */
  21. }
  22. @(default_calling_convention="c", link_prefix="LZ4_")
  23. foreign lib {
  24. versionNumber :: proc() -> c.int --- /**< library version number; useful to check dll version; requires v1.3.0+ */
  25. versionString :: proc() -> cstring --- /**< library version string; useful to check dll version; requires v1.7.5+ */
  26. /*! LZ4_compress_default() :
  27. * Compresses 'srcSize' bytes from buffer 'src'
  28. * into already allocated 'dst' buffer of size 'dstCapacity'.
  29. * Compression is guaranteed to succeed if 'dstCapacity' >= LZ4_compressBound(srcSize).
  30. * It also runs faster, so it's a recommended setting.
  31. * If the function cannot compress 'src' into a more limited 'dst' budget,
  32. * compression stops *immediately*, and the function result is zero.
  33. * In which case, 'dst' content is undefined (invalid).
  34. * srcSize : max supported value is LZ4_MAX_INPUT_SIZE.
  35. * dstCapacity : size of buffer 'dst' (which must be already allocated)
  36. * @return : the number of bytes written into buffer 'dst' (necessarily <= dstCapacity)
  37. * or 0 if compression fails
  38. * Note : This function is protected against buffer overflow scenarios (never writes outside 'dst' buffer, nor read outside 'source' buffer).
  39. */
  40. compress_default :: proc(src, dst: [^]byte, srcSize, dstCapacity: c.int) -> c.int ---
  41. /*! LZ4_decompress_safe() :
  42. * @compressedSize : is the exact complete size of the compressed block.
  43. * @dstCapacity : is the size of destination buffer (which must be already allocated),
  44. * presumed an upper bound of decompressed size.
  45. * @return : the number of bytes decompressed into destination buffer (necessarily <= dstCapacity)
  46. * If destination buffer is not large enough, decoding will stop and output an error code (negative value).
  47. * If the source stream is detected malformed, the function will stop decoding and return a negative result.
  48. * Note 1 : This function is protected against malicious data packets :
  49. * it will never writes outside 'dst' buffer, nor read outside 'source' buffer,
  50. * even if the compressed block is maliciously modified to order the decoder to do these actions.
  51. * In such case, the decoder stops immediately, and considers the compressed block malformed.
  52. * Note 2 : compressedSize and dstCapacity must be provided to the function, the compressed block does not contain them.
  53. * The implementation is free to send / store / derive this information in whichever way is most beneficial.
  54. * If there is a need for a different format which bundles together both compressed data and its metadata, consider looking at lz4frame.h instead.
  55. */
  56. decompress_safe :: proc(src, dst: [^]byte, compressedSize, dstCapacity: c.int) -> c.int ---
  57. /*! LZ4_compressBound() :
  58. Provides the maximum size that LZ4 compression may output in a "worst case" scenario (input data not compressible)
  59. This function is primarily useful for memory allocation purposes (destination buffer size).
  60. Macro LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack memory allocation for example).
  61. Note that LZ4_compress_default() compresses faster when dstCapacity is >= LZ4_compressBound(srcSize)
  62. inputSize : max supported value is LZ4_MAX_INPUT_SIZE
  63. return : maximum output size in a "worst case" scenario
  64. or 0, if input size is incorrect (too large or negative)
  65. */
  66. compressBound :: proc(inputSize: c.int) -> c.int ---
  67. /*! LZ4_compress_fast() :
  68. Same as LZ4_compress_default(), but allows selection of "acceleration" factor.
  69. The larger the acceleration value, the faster the algorithm, but also the lesser the compression.
  70. It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed.
  71. An acceleration value of "1" is the same as regular LZ4_compress_default()
  72. Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT (currently == 1, see lz4.c).
  73. Values > LZ4_ACCELERATION_MAX will be replaced by LZ4_ACCELERATION_MAX (currently == 65537, see lz4.c).
  74. */
  75. compress_fast :: proc(src, dst: [^]byte, srcSize, dstCapacity: c.int, acceleration: c.int) -> c.int ---
  76. /*! LZ4_compress_fast_extState() :
  77. * Same as LZ4_compress_fast(), using an externally allocated memory space for its state.
  78. * Use LZ4_sizeofState() to know how much memory must be allocated,
  79. * and allocate it on 8-bytes boundaries (using `malloc()` typically).
  80. * Then, provide this buffer as `void* state` to compression function.
  81. */
  82. sizeofState :: proc() -> c.int ---
  83. compress_fast_extState :: proc (state: rawptr, src, dst: [^]byte, srcSize, dstCapacity: c.int, acceleration: c.int) -> c.int ---
  84. /*! LZ4_compress_destSize() :
  85. * Reverse the logic : compresses as much data as possible from 'src' buffer
  86. * into already allocated buffer 'dst', of size >= 'dstCapacity'.
  87. * This function either compresses the entire 'src' content into 'dst' if it's large enough,
  88. * or fill 'dst' buffer completely with as much data as possible from 'src'.
  89. * note: acceleration parameter is fixed to "default".
  90. *
  91. * *srcSizePtr : in+out parameter. Initially contains size of input.
  92. * Will be modified to indicate how many bytes where read from 'src' to fill 'dst'.
  93. * New value is necessarily <= input value.
  94. * @return : Nb bytes written into 'dst' (necessarily <= dstCapacity)
  95. * or 0 if compression fails.
  96. *
  97. * Note : from v1.8.2 to v1.9.1, this function had a bug (fixed in v1.9.2+):
  98. * the produced compressed content could, in specific circumstances,
  99. * require to be decompressed into a destination buffer larger
  100. * by at least 1 byte than the content to decompress.
  101. * If an application uses `LZ4_compress_destSize()`,
  102. * it's highly recommended to update liblz4 to v1.9.2 or better.
  103. * If this can't be done or ensured,
  104. * the receiving decompression function should provide
  105. * a dstCapacity which is > decompressedSize, by at least 1 byte.
  106. * See https://github.com/lz4/lz4/issues/859 for details
  107. */
  108. compress_destSize :: proc(src, dst: [^]byte, srcSizePtr: ^c.int, targetDstSize: c.int) -> c.int ---
  109. /*! LZ4_decompress_safe_partial() :
  110. * Decompress an LZ4 compressed block, of size 'srcSize' at position 'src',
  111. * into destination buffer 'dst' of size 'dstCapacity'.
  112. * Up to 'targetOutputSize' bytes will be decoded.
  113. * The function stops decoding on reaching this objective.
  114. * This can be useful to boost performance
  115. * whenever only the beginning of a block is required.
  116. *
  117. * @return : the number of bytes decoded in `dst` (necessarily <= targetOutputSize)
  118. * If source stream is detected malformed, function returns a negative result.
  119. *
  120. * Note 1 : @return can be < targetOutputSize, if compressed block contains less data.
  121. *
  122. * Note 2 : targetOutputSize must be <= dstCapacity
  123. *
  124. * Note 3 : this function effectively stops decoding on reaching targetOutputSize,
  125. * so dstCapacity is kind of redundant.
  126. * This is because in older versions of this function,
  127. * decoding operation would still write complete sequences.
  128. * Therefore, there was no guarantee that it would stop writing at exactly targetOutputSize,
  129. * it could write more bytes, though only up to dstCapacity.
  130. * Some "margin" used to be required for this operation to work properly.
  131. * Thankfully, this is no longer necessary.
  132. * The function nonetheless keeps the same signature, in an effort to preserve API compatibility.
  133. *
  134. * Note 4 : If srcSize is the exact size of the block,
  135. * then targetOutputSize can be any value,
  136. * including larger than the block's decompressed size.
  137. * The function will, at most, generate block's decompressed size.
  138. *
  139. * Note 5 : If srcSize is _larger_ than block's compressed size,
  140. * then targetOutputSize **MUST** be <= block's decompressed size.
  141. * Otherwise, *silent corruption will occur*.
  142. */
  143. decompress_safe_partial :: proc (src, dst: [^]byte, srcSize, targetOutputSize, dstCapacity: c.int) -> c.int ---
  144. createStream :: proc() -> ^stream_t ---
  145. freeStream :: proc(streamPtr: ^stream_t) -> c.int ---
  146. /*! LZ4_resetStream_fast() : v1.9.0+
  147. * Use this to prepare an LZ4_stream_t for a new chain of dependent blocks
  148. * (e.g., LZ4_compress_fast_continue()).
  149. *
  150. * An LZ4_stream_t must be initialized once before usage.
  151. * This is automatically done when created by LZ4_createStream().
  152. * However, should the LZ4_stream_t be simply declared on stack (for example),
  153. * it's necessary to initialize it first, using LZ4_initStream().
  154. *
  155. * After init, start any new stream with LZ4_resetStream_fast().
  156. * A same LZ4_stream_t can be re-used multiple times consecutively
  157. * and compress multiple streams,
  158. * provided that it starts each new stream with LZ4_resetStream_fast().
  159. *
  160. * LZ4_resetStream_fast() is much faster than LZ4_initStream(),
  161. * but is not compatible with memory regions containing garbage data.
  162. *
  163. * Note: it's only useful to call LZ4_resetStream_fast()
  164. * in the context of streaming compression.
  165. * The *extState* functions perform their own resets.
  166. * Invoking LZ4_resetStream_fast() before is redundant, and even counterproductive.
  167. */
  168. resetStream_fast :: proc(streamPtr: ^stream_t) ---
  169. /*! LZ4_loadDict() :
  170. * Use this function to reference a static dictionary into LZ4_stream_t.
  171. * The dictionary must remain available during compression.
  172. * LZ4_loadDict() triggers a reset, so any previous data will be forgotten.
  173. * The same dictionary will have to be loaded on decompression side for successful decoding.
  174. * Dictionary are useful for better compression of small data (KB range).
  175. * While LZ4 itself accepts any input as dictionary, dictionary efficiency is also a topic.
  176. * When in doubt, employ the Zstandard's Dictionary Builder.
  177. * Loading a size of 0 is allowed, and is the same as reset.
  178. * @return : loaded dictionary size, in bytes (note: only the last 64 KB are loaded)
  179. */
  180. loadDict :: proc(streamPtr: ^stream_t, dictionary: [^]byte, dictSize: c.int) -> c.int ---
  181. /*! LZ4_loadDictSlow() : v1.10.0+
  182. * Same as LZ4_loadDict(),
  183. * but uses a bit more cpu to reference the dictionary content more thoroughly.
  184. * This is expected to slightly improve compression ratio.
  185. * The extra-cpu cost is likely worth it if the dictionary is re-used across multiple sessions.
  186. * @return : loaded dictionary size, in bytes (note: only the last 64 KB are loaded)
  187. */
  188. loadDictSlow :: proc(streamPtr: ^stream_t, dictionary: [^]byte, dictSize: c.int) -> c.int ---
  189. /*! LZ4_attach_dictionary() : stable since v1.10.0
  190. *
  191. * This allows efficient re-use of a static dictionary multiple times.
  192. *
  193. * Rather than re-loading the dictionary buffer into a working context before
  194. * each compression, or copying a pre-loaded dictionary's LZ4_stream_t into a
  195. * working LZ4_stream_t, this function introduces a no-copy setup mechanism,
  196. * in which the working stream references @dictionaryStream in-place.
  197. *
  198. * Several assumptions are made about the state of @dictionaryStream.
  199. * Currently, only states which have been prepared by LZ4_loadDict() or
  200. * LZ4_loadDictSlow() should be expected to work.
  201. *
  202. * Alternatively, the provided @dictionaryStream may be NULL,
  203. * in which case any existing dictionary stream is unset.
  204. *
  205. * If a dictionary is provided, it replaces any pre-existing stream history.
  206. * The dictionary contents are the only history that can be referenced and
  207. * logically immediately precede the data compressed in the first subsequent
  208. * compression call.
  209. *
  210. * The dictionary will only remain attached to the working stream through the
  211. * first compression call, at the end of which it is cleared.
  212. * @dictionaryStream stream (and source buffer) must remain in-place / accessible / unchanged
  213. * through the completion of the compression session.
  214. *
  215. * Note: there is no equivalent LZ4_attach_*() method on the decompression side
  216. * because there is no initialization cost, hence no need to share the cost across multiple sessions.
  217. * To decompress LZ4 blocks using dictionary, attached or not,
  218. * just employ the regular LZ4_setStreamDecode() for streaming,
  219. * or the stateless LZ4_decompress_safe_usingDict() for one-shot decompression.
  220. */
  221. attach_dictionary :: proc(workingStream, dictionaryStream: ^stream_t) ---
  222. /*! LZ4_compress_fast_continue() :
  223. * Compress 'src' content using data from previously compressed blocks, for better compression ratio.
  224. * 'dst' buffer must be already allocated.
  225. * If dstCapacity >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster.
  226. *
  227. * @return : size of compressed block
  228. * or 0 if there is an error (typically, cannot fit into 'dst').
  229. *
  230. * Note 1 : Each invocation to LZ4_compress_fast_continue() generates a new block.
  231. * Each block has precise boundaries.
  232. * Each block must be decompressed separately, calling LZ4_decompress_*() with relevant metadata.
  233. * It's not possible to append blocks together and expect a single invocation of LZ4_decompress_*() to decompress them together.
  234. *
  235. * Note 2 : The previous 64KB of source data is __assumed__ to remain present, unmodified, at same address in memory !
  236. *
  237. * Note 3 : When input is structured as a double-buffer, each buffer can have any size, including < 64 KB.
  238. * Make sure that buffers are separated, by at least one byte.
  239. * This construction ensures that each block only depends on previous block.
  240. *
  241. * Note 4 : If input buffer is a ring-buffer, it can have any size, including < 64 KB.
  242. *
  243. * Note 5 : After an error, the stream status is undefined (invalid), it can only be reset or freed.
  244. */
  245. compress_fast_continue :: proc(streamPtr: ^stream_t, src, dst: [^]byte, srcSize, dstCapacity: c.int, acceleration: c.int) -> c.int ---
  246. /*! LZ4_saveDict() :
  247. * If last 64KB data cannot be guaranteed to remain available at its current memory location,
  248. * save it into a safer place (char* safeBuffer).
  249. * This is schematically equivalent to a memcpy() followed by LZ4_loadDict(),
  250. * but is much faster, because LZ4_saveDict() doesn't need to rebuild tables.
  251. * @return : saved dictionary size in bytes (necessarily <= maxDictSize), or 0 if error.
  252. */
  253. saveDict :: proc(streamPtr: ^stream_t, safeBuffer: [^]byte, maxDictSize: c.int) -> c.int ---
  254. createStreamDecode :: proc() -> ^streamDecode_t ---
  255. freeStreamDecode :: proc(LZ4_stream: ^streamDecode_t) -> c.int ---
  256. /*! LZ4_setStreamDecode() :
  257. * An LZ4_streamDecode_t context can be allocated once and re-used multiple times.
  258. * Use this function to start decompression of a new stream of blocks.
  259. * A dictionary can optionally be set. Use NULL or size 0 for a reset order.
  260. * Dictionary is presumed stable : it must remain accessible and unmodified during next decompression.
  261. * @return : 1 if OK, 0 if error
  262. */
  263. setStreamDecode :: proc(LZ4_streamDecode: ^streamDecode_t, dictionary: [^]byte, dictSize: c.int) -> c.int ---
  264. /*! LZ4_decoderRingBufferSize() : v1.8.2+
  265. * Note : in a ring buffer scenario (optional),
  266. * blocks are presumed decompressed next to each other
  267. * up to the moment there is not enough remaining space for next block (remainingSize < maxBlockSize),
  268. * at which stage it resumes from beginning of ring buffer.
  269. * When setting such a ring buffer for streaming decompression,
  270. * provides the minimum size of this ring buffer
  271. * to be compatible with any source respecting maxBlockSize condition.
  272. * @return : minimum ring buffer size,
  273. * or 0 if there is an error (invalid maxBlockSize).
  274. */
  275. decoderRingBufferSize :: proc(maxBlockSize: c.int) -> c.int ---
  276. /*! LZ4_decompress_safe_continue() :
  277. * This decoding function allows decompression of consecutive blocks in "streaming" mode.
  278. * The difference with the usual independent blocks is that
  279. * new blocks are allowed to find references into former blocks.
  280. * A block is an unsplittable entity, and must be presented entirely to the decompression function.
  281. * LZ4_decompress_safe_continue() only accepts one block at a time.
  282. * It's modeled after `LZ4_decompress_safe()` and behaves similarly.
  283. *
  284. * @LZ4_streamDecode : decompression state, tracking the position in memory of past data
  285. * @compressedSize : exact complete size of one compressed block.
  286. * @dstCapacity : size of destination buffer (which must be already allocated),
  287. * must be an upper bound of decompressed size.
  288. * @return : number of bytes decompressed into destination buffer (necessarily <= dstCapacity)
  289. * If destination buffer is not large enough, decoding will stop and output an error code (negative value).
  290. * If the source stream is detected malformed, the function will stop decoding and return a negative result.
  291. *
  292. * The last 64KB of previously decoded data *must* remain available and unmodified
  293. * at the memory position where they were previously decoded.
  294. * If less than 64KB of data has been decoded, all the data must be present.
  295. *
  296. * Special : if decompression side sets a ring buffer, it must respect one of the following conditions :
  297. * - Decompression buffer size is _at least_ LZ4_decoderRingBufferSize(maxBlockSize).
  298. * maxBlockSize is the maximum size of any single block. It can have any value > 16 bytes.
  299. * In which case, encoding and decoding buffers do not need to be synchronized.
  300. * Actually, data can be produced by any source compliant with LZ4 format specification, and respecting maxBlockSize.
  301. * - Synchronized mode :
  302. * Decompression buffer size is _exactly_ the same as compression buffer size,
  303. * and follows exactly same update rule (block boundaries at same positions),
  304. * and decoding function is provided with exact decompressed size of each block (exception for last block of the stream),
  305. * _then_ decoding & encoding ring buffer can have any size, including small ones ( < 64 KB).
  306. * - Decompression buffer is larger than encoding buffer, by a minimum of maxBlockSize more bytes.
  307. * In which case, encoding and decoding buffers do not need to be synchronized,
  308. * and encoding ring buffer can have any size, including small ones ( < 64 KB).
  309. *
  310. * Whenever these conditions are not possible,
  311. * save the last 64KB of decoded data into a safe buffer where it can't be modified during decompression,
  312. * then indicate where this data is saved using LZ4_setStreamDecode(), before decompressing next block.
  313. */
  314. decompress_safe_continue :: proc(LZ4_streamDecode: ^streamDecode_t, src, dst: [^]byte, srcSize, dstCapacity: c.int) -> c.int ---
  315. /*! LZ4_decompress_safe_usingDict() :
  316. * Works the same as
  317. * a combination of LZ4_setStreamDecode() followed by LZ4_decompress_safe_continue()
  318. * However, it's stateless: it doesn't need any LZ4_streamDecode_t state.
  319. * Dictionary is presumed stable : it must remain accessible and unmodified during decompression.
  320. * Performance tip : Decompression speed can be substantially increased
  321. * when dst == dictStart + dictSize.
  322. */
  323. decompress_safe_usingDict :: proc(src, dst: [^]byte, srcSize, dstCapacity: c.int, dictStart: [^]byte, dictSize: c.int) -> c.int ---
  324. /*! LZ4_decompress_safe_partial_usingDict() :
  325. * Behaves the same as LZ4_decompress_safe_partial()
  326. * with the added ability to specify a memory segment for past data.
  327. * Performance tip : Decompression speed can be substantially increased
  328. * when dst == dictStart + dictSize.
  329. */
  330. decompress_safe_partial_usingDict :: proc(src, dst: [^]byte, compressedSize, targetOutputSize, maxOutputSize: c.int, dictStart: [^]byte, dictSize: c.int) -> c.int ---
  331. }
  332. STREAM_MINSIZE :: (1 << MEMORY_USAGE) + 32 /* static size, for inter-version compatibility */
  333. stream_t :: struct #raw_union {
  334. minStateSize: [STREAM_MINSIZE]byte,
  335. internal_donotuse: stream_t_internal,
  336. }
  337. HASHLOG :: MEMORY_USAGE-2
  338. HASHTABLESIZE :: 1 << MEMORY_USAGE
  339. HASH_SIZE_U32 :: 1 << HASHLOG /* required as macro for static allocation */
  340. stream_t_internal :: struct {
  341. hashTable: [HASH_SIZE_U32]u32,
  342. dictionary: [^]byte,
  343. dictCtx: ^stream_t_internal,
  344. currentOffset: u32,
  345. tableType: u32,
  346. dictSize: u32,
  347. /* Implicit padding to ensure structure is aligned */
  348. }
  349. STREAMDECODE_MINSIZE :: 32
  350. streamDecode_t :: struct #raw_union {
  351. minStateSize: [STREAMDECODE_MINSIZE]byte,
  352. internal_donotuse: streamDecode_t_internal,
  353. }
  354. streamDecode_t_internal :: struct {
  355. externalDict: [^]byte,
  356. prefixEnd: [^]byte,
  357. extDictSize: c.size_t,
  358. prefixSize: c.size_t,
  359. }
  360. ///////////////////
  361. // lz4hc
  362. CLEVEL_MIN :: 2
  363. CLEVEL_DEFAULT :: 9
  364. CLEVEL_OPT_MIN :: 10
  365. CLEVEL_MAX :: 12
  366. @(default_calling_convention="c", link_prefix="LZ4_")
  367. foreign lib {
  368. /*! LZ4_compress_HC() :
  369. * Compress data from `src` into `dst`, using the powerful but slower "HC" algorithm.
  370. * `dst` must be already allocated.
  371. * Compression is guaranteed to succeed if `dstCapacity >= LZ4_compressBound(srcSize)` (see "lz4.h")
  372. * Max supported `srcSize` value is LZ4_MAX_INPUT_SIZE (see "lz4.h")
  373. * `compressionLevel` : any value between 1 and LZ4HC_CLEVEL_MAX will work.
  374. * Values > LZ4HC_CLEVEL_MAX behave the same as LZ4HC_CLEVEL_MAX.
  375. * @return : the number of bytes written into 'dst'
  376. * or 0 if compression fails.
  377. */
  378. compress_HC :: proc(src, dst: [^]byte, srcSize, dstCapacity, compressionLevel: c.int) -> c.int ---
  379. /*! LZ4_compress_HC_extStateHC() :
  380. * Same as LZ4_compress_HC(), but using an externally allocated memory segment for `state`.
  381. * `state` size is provided by LZ4_sizeofStateHC().
  382. * Memory segment must be aligned on 8-bytes boundaries (which a normal malloc() should do properly).
  383. */
  384. sizeofStateHC :: proc() -> c.int ---
  385. compress_HC_extStateHC :: proc(stateHC: rawptr, src, dst: [^]byte, srcSize, maxDstSize: c.int, compressionLevel: c.int) -> c.int ---
  386. /*! LZ4_compress_HC_destSize() : v1.9.0+
  387. * Will compress as much data as possible from `src`
  388. * to fit into `targetDstSize` budget.
  389. * Result is provided in 2 parts :
  390. * @return : the number of bytes written into 'dst' (necessarily <= targetDstSize)
  391. * or 0 if compression fails.
  392. * `srcSizePtr` : on success, *srcSizePtr is updated to indicate how much bytes were read from `src`
  393. */
  394. compress_HC_destSize :: proc(stateHC: rawptr, src, dst: [^]byte, srcSizePtr: ^c.int, targetDstSize: c.int, compressionLevel: c.int) -> c.int ---
  395. /*! LZ4_createStreamHC() and LZ4_freeStreamHC() :
  396. * These functions create and release memory for LZ4 HC streaming state.
  397. * Newly created states are automatically initialized.
  398. * A same state can be used multiple times consecutively,
  399. * starting with LZ4_resetStreamHC_fast() to start a new stream of blocks.
  400. */
  401. createStreamHC :: proc() -> ^streamHC_t ---
  402. freeStreamHC :: proc(streamHCPtr: ^streamHC_t) -> c.int ---
  403. resetStreamHC_fast :: proc(streamHCPtr: ^streamHC_t, compressionLevel: c.int) --- /* v1.9.0+ */
  404. loadDictHC :: proc(streamHCPtr: ^streamHC_t, dictionary: [^]byte, dictSize: c.int) -> c.int ---
  405. compress_HC_continue :: proc(streamHCPtr: ^streamHC_t, src, dst: [^]byte, srcSize, maxDstSize: c.int) -> c.int ---
  406. /*! LZ4_compress_HC_continue_destSize() : v1.9.0+
  407. * Similar to LZ4_compress_HC_continue(),
  408. * but will read as much data as possible from `src`
  409. * to fit into `targetDstSize` budget.
  410. * Result is provided into 2 parts :
  411. * @return : the number of bytes written into 'dst' (necessarily <= targetDstSize)
  412. * or 0 if compression fails.
  413. * `srcSizePtr` : on success, *srcSizePtr will be updated to indicate how much bytes were read from `src`.
  414. * Note that this function may not consume the entire input.
  415. */
  416. compress_HC_continue_destSize:: proc(LZ4_streamHCPtr: ^streamHC_t, src, dst: [^]byte, srcSizePtr: ^c.int, targetDstSize: c.int) -> c.int ---
  417. saveDictHC :: proc(streamHCPtr: ^streamHC_t, safeBuffer: [^]byte, maxDictSize: c.int) -> c.int ---
  418. /*! LZ4_attach_HC_dictionary() : stable since v1.10.0
  419. * This API allows for the efficient re-use of a static dictionary many times.
  420. *
  421. * Rather than re-loading the dictionary buffer into a working context before
  422. * each compression, or copying a pre-loaded dictionary's LZ4_streamHC_t into a
  423. * working LZ4_streamHC_t, this function introduces a no-copy setup mechanism,
  424. * in which the working stream references the dictionary stream in-place.
  425. *
  426. * Several assumptions are made about the state of the dictionary stream.
  427. * Currently, only streams which have been prepared by LZ4_loadDictHC() should
  428. * be expected to work.
  429. *
  430. * Alternatively, the provided dictionary stream pointer may be NULL, in which
  431. * case any existing dictionary stream is unset.
  432. *
  433. * A dictionary should only be attached to a stream without any history (i.e.,
  434. * a stream that has just been reset).
  435. *
  436. * The dictionary will remain attached to the working stream only for the
  437. * current stream session. Calls to LZ4_resetStreamHC(_fast) will remove the
  438. * dictionary context association from the working stream. The dictionary
  439. * stream (and source buffer) must remain in-place / accessible / unchanged
  440. * through the lifetime of the stream session.
  441. */
  442. attach_HC_dictionary :: proc(working_stream, dictionary_stream: ^streamHC_t) ---
  443. }
  444. HC_DICTIONARY_LOGSIZE :: 16
  445. HC_MAXD :: 1<<HC_DICTIONARY_LOGSIZE
  446. HC_MAXD_MASK :: HC_MAXD - 1
  447. HC_HASH_LOG :: 15
  448. HC_HASHTABLESIZE :: 1 << HC_HASH_LOG
  449. HC_HASH_MASK :: HC_HASHTABLESIZE - 1
  450. streamHC_internal_t :: struct {
  451. hashTable: [HC_HASHTABLESIZE]u32,
  452. chainTable: [HC_MAXD]u16,
  453. end: [^]byte, /* next block here to continue on current prefix */
  454. prefixStart: [^]byte, /* Indexes relative to this position */
  455. dictStart: [^]byte, /* alternate reference for extDict */
  456. dictLimit: u32, /* below that point, need extDict */
  457. lowLimit: u32, /* below that point, no more history */
  458. nextToUpdate: u32, /* index from which to continue dictionary update */
  459. compressionLevel: c.short,
  460. favorDecSpeed: i8, /* favor decompression speed if this flag set,
  461. otherwise, favor compression ratio */
  462. dirty: i8, /* stream has to be fully reset if this flag is set */
  463. dictCtx: ^streamHC_internal_t,
  464. }
  465. STREAMHC_MINSIZE :: 262200
  466. streamHC_t :: struct #raw_union {
  467. minStateSize: [STREAMHC_MINSIZE]byte,
  468. internal_donotuse: streamHC_internal_t,
  469. }