zlib.odin 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668
  1. #+vet !using-param
  2. package compress_zlib
  3. /*
  4. Copyright 2021 Jeroen van Rijn <[email protected]>.
  5. Made available under Odin's BSD-3 license.
  6. List of contributors:
  7. Jeroen van Rijn: Initial implementation, optimization.
  8. Ginger Bill: Cosmetic changes.
  9. */
  10. import "core:compress"
  11. import "base:intrinsics"
  12. import "core:mem"
  13. import "core:io"
  14. import "core:hash"
  15. import "core:bytes"
  16. /*
  17. zlib.inflate decompresses a ZLIB stream passed in as a []u8 or io.Stream.
  18. Returns: Error.
  19. */
  20. /*
  21. Do we do Adler32 as we write bytes to output?
  22. It used to be faster to do it inline, now it's faster to do it at the end of `inflate`.
  23. We'll see what's faster after more optimization, and might end up removing
  24. `Context.rolling_hash` if not inlining it is still faster.
  25. */
  26. Compression_Method :: enum u8 {
  27. DEFLATE = 8,
  28. Reserved = 15,
  29. }
  30. Compression_Level :: enum u8 {
  31. Fastest = 0,
  32. Fast = 1,
  33. Default = 2,
  34. Maximum = 3,
  35. }
  36. Options :: struct {
  37. window_size: u16,
  38. level: u8,
  39. }
  40. Error :: compress.Error
  41. General_Error :: compress.General_Error
  42. ZLIB_Error :: compress.ZLIB_Error
  43. Deflate_Error :: compress.Deflate_Error
  44. DEFLATE_MAX_CHUNK_SIZE :: 65535
  45. DEFLATE_MAX_LITERAL_SIZE :: 65535
  46. DEFLATE_MAX_DISTANCE :: 32768
  47. DEFLATE_MAX_LENGTH :: 258
  48. HUFFMAN_MAX_BITS :: 16
  49. HUFFMAN_FAST_BITS :: 9
  50. HUFFMAN_FAST_MASK :: ((1 << HUFFMAN_FAST_BITS) - 1)
  51. Z_LENGTH_BASE := [31]u16{
  52. 3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,
  53. 67,83,99,115,131,163,195,227,258,0,0,
  54. }
  55. Z_LENGTH_EXTRA := [31]u8{
  56. 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0,
  57. }
  58. Z_DIST_BASE := [32]u16{
  59. 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,
  60. 257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0,
  61. }
  62. Z_DIST_EXTRA := [32]u8{
  63. 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13,0,0,
  64. }
  65. Z_LENGTH_DEZIGZAG := []u8{
  66. 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15,
  67. }
  68. Z_FIXED_LENGTH := [288]u8{
  69. 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
  70. 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
  71. 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
  72. 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
  73. 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
  74. 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
  75. 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
  76. 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
  77. 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8,
  78. }
  79. Z_FIXED_DIST := [32]u8{
  80. 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
  81. }
  82. /*
  83. Accelerate all cases in default tables.
  84. */
  85. ZFAST_BITS :: 9
  86. ZFAST_MASK :: ((1 << ZFAST_BITS) - 1)
  87. /*
  88. ZLIB-style Huffman encoding.
  89. JPEG packs from left, ZLIB from right. We can't share code.
  90. */
  91. Huffman_Table :: struct {
  92. fast: [1 << ZFAST_BITS]u16,
  93. firstcode: [17]u16,
  94. maxcode: [17]int,
  95. firstsymbol: [17]u16,
  96. size: [288]u8,
  97. value: [288]u16,
  98. }
  99. // Implementation starts here
  100. @(optimization_mode="favor_size")
  101. z_bit_reverse :: #force_inline proc(n: u16, bits: u8) -> (r: u16) {
  102. assert(bits <= 16)
  103. r = intrinsics.reverse_bits(n)
  104. r >>= (16 - bits)
  105. return
  106. }
  107. @(optimization_mode="favor_size")
  108. grow_buffer :: proc(buf: ^[dynamic]u8) -> (err: compress.Error) {
  109. /*
  110. That we get here at all means that we didn't pass an expected output size,
  111. or that it was too little.
  112. */
  113. /*
  114. Double until we reach the maximum allowed.
  115. */
  116. new_size := min(len(buf) << 1, compress.COMPRESS_OUTPUT_ALLOCATE_MAX)
  117. return resize(buf, new_size)
  118. }
  119. /*
  120. TODO: Make these return compress.Error.
  121. */
  122. @(optimization_mode="favor_size")
  123. write_byte :: #force_inline proc(z: ^$C, c: u8) -> (err: io.Error) #no_bounds_check {
  124. /*
  125. Resize if needed.
  126. */
  127. if int(z.bytes_written) + 1 >= len(z.output.buf) {
  128. e := grow_buffer(&z.output.buf)
  129. if e != nil {
  130. return .Short_Write
  131. }
  132. }
  133. #no_bounds_check {
  134. z.output.buf[z.bytes_written] = c
  135. }
  136. z.bytes_written += 1
  137. return .None
  138. }
  139. @(optimization_mode="favor_size")
  140. repl_byte :: proc(z: ^$C, count: u16, c: u8) -> (err: io.Error) #no_bounds_check {
  141. /*
  142. TODO(Jeroen): Once we have a magic ring buffer, we can just peek/write into it
  143. without having to worry about wrapping, so no need for a temp allocation to give to
  144. the output stream, just give it _that_ slice.
  145. */
  146. /*
  147. Resize if needed.
  148. */
  149. if int(z.bytes_written) + int(count) >= len(z.output.buf) {
  150. e := grow_buffer(&z.output.buf)
  151. if e != nil {
  152. return .Short_Write
  153. }
  154. }
  155. #no_bounds_check {
  156. for _ in 0..<count {
  157. z.output.buf[z.bytes_written] = c
  158. z.bytes_written += 1
  159. }
  160. }
  161. return .None
  162. }
  163. @(optimization_mode="favor_size")
  164. repl_bytes :: proc(z: ^$C, count: u16, distance: u16) -> (err: io.Error) {
  165. /*
  166. TODO(Jeroen): Once we have a magic ring buffer, we can just peek/write into it
  167. without having to worry about wrapping, so no need for a temp allocation to give to
  168. the output stream, just give it _that_ slice.
  169. */
  170. offset := i64(distance)
  171. if int(z.bytes_written) + int(count) >= len(z.output.buf) {
  172. e := grow_buffer(&z.output.buf)
  173. if e != nil {
  174. return .Short_Write
  175. }
  176. }
  177. #no_bounds_check {
  178. for _ in 0..<count {
  179. c := z.output.buf[z.bytes_written - offset]
  180. z.output.buf[z.bytes_written] = c
  181. z.bytes_written += 1
  182. }
  183. }
  184. return .None
  185. }
  186. allocate_huffman_table :: proc(allocator := context.allocator) -> (z: ^Huffman_Table, err: Error) {
  187. return new(Huffman_Table, allocator), nil
  188. }
  189. @(optimization_mode="favor_size")
  190. build_huffman :: #force_no_inline proc(z: ^Huffman_Table, code_lengths: []u8) -> (err: Error) {
  191. sizes: [HUFFMAN_MAX_BITS+1]int
  192. next_code: [HUFFMAN_MAX_BITS+1]int
  193. k := int(0)
  194. mem.zero_slice(sizes[:])
  195. mem.zero_slice(z.fast[:])
  196. for v in code_lengths {
  197. sizes[v] += 1
  198. }
  199. sizes[0] = 0
  200. for i in 1 ..< HUFFMAN_MAX_BITS {
  201. if sizes[i] > (1 << uint(i)) {
  202. return .Huffman_Bad_Sizes
  203. }
  204. }
  205. code := int(0)
  206. for i in 1 ..= HUFFMAN_MAX_BITS {
  207. next_code[i] = code
  208. z.firstcode[i] = u16(code)
  209. z.firstsymbol[i] = u16(k)
  210. code = code + sizes[i]
  211. if sizes[i] != 0 {
  212. if code - 1 >= (1 << u16(i)) {
  213. return .Huffman_Bad_Code_Lengths
  214. }
  215. }
  216. z.maxcode[i] = code << (HUFFMAN_MAX_BITS - uint(i))
  217. code <<= 1
  218. k += int(sizes[i])
  219. }
  220. z.maxcode[HUFFMAN_MAX_BITS] = 0x10000 // Sentinel
  221. c: int
  222. for v, ci in code_lengths {
  223. if v != 0 {
  224. c = next_code[v] - int(z.firstcode[v]) + int(z.firstsymbol[v])
  225. fastv := u16((u16(v) << 9) | u16(ci))
  226. z.size[c] = u8(v)
  227. z.value[c] = u16(ci)
  228. if v <= ZFAST_BITS {
  229. j := z_bit_reverse(u16(next_code[v]), v)
  230. for j < (1 << ZFAST_BITS) {
  231. z.fast[j] = fastv
  232. j += (1 << v)
  233. }
  234. }
  235. next_code[v] += 1
  236. }
  237. }
  238. return nil
  239. }
  240. @(optimization_mode="favor_size")
  241. decode_huffman_slowpath :: proc(z: ^$C, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
  242. code := u16(compress.peek_bits_lsb(z,16))
  243. k := int(z_bit_reverse(code, 16))
  244. s: u8 = HUFFMAN_FAST_BITS+1
  245. for {
  246. #no_bounds_check if k < t.maxcode[s] {
  247. break
  248. }
  249. s += 1
  250. }
  251. if s >= 16 {
  252. return 0, .Bad_Huffman_Code
  253. }
  254. // code size is s, so:
  255. b := (k >> (16-s)) - int(t.firstcode[s]) + int(t.firstsymbol[s])
  256. if b >= size_of(t.size) {
  257. return 0, .Bad_Huffman_Code
  258. }
  259. if t.size[b] != s {
  260. return 0, .Bad_Huffman_Code
  261. }
  262. compress.consume_bits_lsb(z, s)
  263. r = t.value[b]
  264. return r, nil
  265. }
  266. @(optimization_mode="favor_size")
  267. decode_huffman :: proc(z: ^$C, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
  268. if z.num_bits < 16 {
  269. if z.num_bits > 63 {
  270. return 0, .Code_Buffer_Malformed
  271. }
  272. compress.refill_lsb(z)
  273. if z.num_bits > 63 {
  274. return 0, .Stream_Too_Short
  275. }
  276. }
  277. #no_bounds_check b := t.fast[z.code_buffer & ZFAST_MASK]
  278. if b != 0 {
  279. s := u8(b >> ZFAST_BITS)
  280. compress.consume_bits_lsb(z, s)
  281. return b & 511, nil
  282. }
  283. return decode_huffman_slowpath(z, t)
  284. }
  285. @(optimization_mode="favor_size")
  286. parse_huffman_block :: proc(z: ^$C, z_repeat, z_offset: ^Huffman_Table) -> (err: Error) #no_bounds_check {
  287. #no_bounds_check for {
  288. value, e := decode_huffman(z, z_repeat)
  289. if e != nil {
  290. return err
  291. }
  292. if value < 256 {
  293. e := write_byte(z, u8(value))
  294. if e != .None {
  295. return .Output_Too_Short
  296. }
  297. } else {
  298. if value == 256 {
  299. // End of block
  300. return nil
  301. }
  302. value -= 257
  303. length := Z_LENGTH_BASE[value]
  304. if Z_LENGTH_EXTRA[value] > 0 {
  305. length += u16(compress.read_bits_lsb(z, Z_LENGTH_EXTRA[value]))
  306. }
  307. value, e = decode_huffman(z, z_offset)
  308. if e != nil {
  309. return .Bad_Huffman_Code
  310. }
  311. distance := Z_DIST_BASE[value]
  312. if Z_DIST_EXTRA[value] > 0 {
  313. distance += u16(compress.read_bits_lsb(z, Z_DIST_EXTRA[value]))
  314. }
  315. if z.bytes_written < i64(distance) {
  316. // Distance is longer than we've decoded so far.
  317. return .Bad_Distance
  318. }
  319. /*
  320. These might be sped up with a repl_byte call that copies
  321. from the already written output more directly, and that
  322. update the Adler checksum once after.
  323. That way we'd suffer less Stream vtable overhead.
  324. */
  325. if distance == 1 {
  326. /*
  327. Replicate the last outputted byte, length times.
  328. */
  329. if length > 0 {
  330. c := z.output.buf[z.bytes_written - i64(distance)]
  331. e := repl_byte(z, length, c)
  332. if e != .None {
  333. return .Output_Too_Short
  334. }
  335. }
  336. } else {
  337. if length > 0 {
  338. e := repl_bytes(z, length, distance)
  339. if e != .None {
  340. return .Output_Too_Short
  341. }
  342. }
  343. }
  344. }
  345. }
  346. }
  347. @(optimization_mode="favor_size")
  348. inflate_from_context :: proc(using ctx: ^compress.Context_Memory_Input, raw := false, expected_output_size := -1, allocator := context.allocator) -> (err: Error) #no_bounds_check {
  349. /*
  350. ctx.output must be a bytes.Buffer for now. We'll add a separate implementation that writes to a stream.
  351. raw determines whether the ZLIB header is processed, or we're inflating a raw
  352. DEFLATE stream.
  353. */
  354. if !raw {
  355. size, size_err := compress.input_size(ctx)
  356. if size < 6 || size_err != nil {
  357. return .Stream_Too_Short
  358. }
  359. cmf, _ := compress.read_u8(ctx)
  360. method := Compression_Method(cmf & 0xf)
  361. if method != .DEFLATE {
  362. return .Unknown_Compression_Method
  363. }
  364. if cinfo := (cmf >> 4) & 0xf; cinfo > 7 {
  365. return .Unsupported_Window_Size
  366. }
  367. flg, _ := compress.read_u8(ctx)
  368. fcheck := flg & 0x1f
  369. fcheck_computed := (cmf << 8 | flg) & 0x1f
  370. if fcheck != fcheck_computed {
  371. return .Checksum_Failed
  372. }
  373. /*
  374. We don't handle built-in dictionaries for now.
  375. They're application specific and PNG doesn't use them.
  376. */
  377. if fdict := (flg >> 5) & 1; fdict != 0 {
  378. return .FDICT_Unsupported
  379. }
  380. // flevel := Compression_Level((flg >> 6) & 3);
  381. /*
  382. Inflate can consume bits belonging to the Adler checksum.
  383. We pass the entire stream to Inflate and will unget bytes if we need to
  384. at the end to compare checksums.
  385. */
  386. }
  387. // Parse ZLIB stream without header.
  388. inflate_raw(ctx, expected_output_size=expected_output_size) or_return
  389. if !raw {
  390. compress.discard_to_next_byte_lsb(ctx)
  391. adler_b: [4]u8
  392. for _, i in adler_b {
  393. adler_b[i], _ = compress.read_u8_prefer_code_buffer_lsb(ctx)
  394. }
  395. adler := transmute(u32be)adler_b
  396. output_hash := hash.adler32(ctx.output.buf[:])
  397. if output_hash != u32(adler) {
  398. return .Checksum_Failed
  399. }
  400. }
  401. return nil
  402. }
  403. // TODO: Check alignment of reserve/resize.
  404. @(optimization_mode="favor_size")
  405. inflate_raw :: proc(z: ^$C, expected_output_size := -1, allocator := context.allocator) -> (err: Error) #no_bounds_check {
  406. context.allocator = allocator
  407. expected_output_size := expected_output_size
  408. /*
  409. Always set up a minimum allocation size.
  410. */
  411. expected_output_size = max(max(expected_output_size, compress.COMPRESS_OUTPUT_ALLOCATE_MIN), 512)
  412. // fmt.printf("\nZLIB: Expected Payload Size: %v\n\n", expected_output_size);
  413. if expected_output_size > 0 && expected_output_size <= compress.COMPRESS_OUTPUT_ALLOCATE_MAX {
  414. /*
  415. Try to pre-allocate the output buffer.
  416. */
  417. reserve(&z.output.buf, expected_output_size) or_return
  418. resize (&z.output.buf, expected_output_size) or_return
  419. }
  420. if len(z.output.buf) != expected_output_size {
  421. return .Resize_Failed
  422. }
  423. z.num_bits = 0
  424. z.code_buffer = 0
  425. z_repeat: ^Huffman_Table
  426. z_offset: ^Huffman_Table
  427. codelength_ht: ^Huffman_Table
  428. defer free(z_repeat)
  429. defer free(z_offset)
  430. defer free(codelength_ht)
  431. z_repeat = allocate_huffman_table() or_return
  432. z_offset = allocate_huffman_table() or_return
  433. codelength_ht = allocate_huffman_table() or_return
  434. final := u32(0)
  435. type := u32(0)
  436. for {
  437. final = compress.read_bits_lsb(z, 1)
  438. type = compress.read_bits_lsb(z, 2)
  439. // fmt.printf("Final: %v | Type: %v\n", final, type)
  440. switch type {
  441. case 0:
  442. // fmt.printf("Method 0: STORED\n")
  443. // Uncompressed block
  444. // Discard bits until next byte boundary
  445. compress.discard_to_next_byte_lsb(z)
  446. uncompressed_len := u16(compress.read_bits_lsb(z, 16))
  447. length_check := u16(compress.read_bits_lsb(z, 16))
  448. // fmt.printf("LEN: %v, ~LEN: %v, NLEN: %v, ~NLEN: %v\n", uncompressed_len, ~uncompressed_len, length_check, ~length_check)
  449. if ~uncompressed_len != length_check {
  450. return .Len_Nlen_Mismatch
  451. }
  452. /*
  453. TODO: Maybe speed this up with a stream-to-stream copy (read_from)
  454. and a single Adler32 update after.
  455. */
  456. #no_bounds_check for uncompressed_len > 0 {
  457. compress.refill_lsb(z)
  458. lit := compress.read_bits_lsb(z, 8)
  459. write_byte(z, u8(lit))
  460. uncompressed_len -= 1
  461. }
  462. assert(uncompressed_len == 0)
  463. case 3:
  464. return .BType_3
  465. case:
  466. // fmt.printf("Err: %v | Final: %v | Type: %v\n", err, final, type)
  467. if type == 1 {
  468. // Use fixed code lengths.
  469. build_huffman(z_repeat, Z_FIXED_LENGTH[:]) or_return
  470. build_huffman(z_offset, Z_FIXED_DIST[:]) or_return
  471. } else {
  472. lencodes: [286+32+137]u8
  473. codelength_sizes: [19]u8
  474. //i: u32;
  475. n: u32
  476. compress.refill_lsb(z, 14)
  477. hlit := compress.read_bits_no_refill_lsb(z, 5) + 257
  478. hdist := compress.read_bits_no_refill_lsb(z, 5) + 1
  479. hclen := compress.read_bits_no_refill_lsb(z, 4) + 4
  480. ntot := hlit + hdist
  481. #no_bounds_check for i in 0..<hclen {
  482. s := compress.read_bits_lsb(z, 3)
  483. codelength_sizes[Z_LENGTH_DEZIGZAG[i]] = u8(s)
  484. }
  485. build_huffman(codelength_ht, codelength_sizes[:]) or_return
  486. n = 0
  487. c: u16
  488. for n < ntot {
  489. c = decode_huffman(z, codelength_ht) or_return
  490. if c < 0 || c >= 19 {
  491. return .Huffman_Bad_Code_Lengths
  492. }
  493. if c < 16 {
  494. lencodes[n] = u8(c)
  495. n += 1
  496. } else {
  497. fill := u8(0)
  498. compress.refill_lsb(z, 7)
  499. switch c {
  500. case 16:
  501. c = u16(compress.read_bits_no_refill_lsb(z, 2) + 3)
  502. if n == 0 {
  503. return .Huffman_Bad_Code_Lengths
  504. }
  505. fill = lencodes[n - 1]
  506. case 17:
  507. c = u16(compress.read_bits_no_refill_lsb(z, 3) + 3)
  508. case 18:
  509. c = u16(compress.read_bits_no_refill_lsb(z, 7) + 11)
  510. case:
  511. return .Huffman_Bad_Code_Lengths
  512. }
  513. if ntot - n < u32(c) {
  514. return .Huffman_Bad_Code_Lengths
  515. }
  516. nc := n + u32(c)
  517. #no_bounds_check for ; n < nc; n += 1 {
  518. lencodes[n] = fill
  519. }
  520. }
  521. }
  522. if n != ntot {
  523. return .Huffman_Bad_Code_Lengths
  524. }
  525. build_huffman(z_repeat, lencodes[:hlit]) or_return
  526. build_huffman(z_offset, lencodes[hlit:ntot]) or_return
  527. }
  528. parse_huffman_block(z, z_repeat, z_offset) or_return
  529. }
  530. if final == 1 {
  531. break
  532. }
  533. }
  534. if int(z.bytes_written) != len(z.output.buf) {
  535. resize(&z.output.buf, int(z.bytes_written)) or_return
  536. }
  537. return nil
  538. }
  539. inflate_from_byte_array :: proc(input: []u8, buf: ^bytes.Buffer, raw := false, expected_output_size := -1) -> (err: Error) {
  540. ctx := compress.Context_Memory_Input{}
  541. ctx.input_data = input
  542. ctx.output = buf
  543. return inflate_from_context(&ctx, raw=raw, expected_output_size=expected_output_size)
  544. }
  545. inflate_from_byte_array_raw :: proc(input: []u8, buf: ^bytes.Buffer, raw := false, expected_output_size := -1) -> (err: Error) {
  546. ctx := compress.Context_Memory_Input{}
  547. ctx.input_data = input
  548. ctx.output = buf
  549. return inflate_raw(&ctx, expected_output_size=expected_output_size)
  550. }
  551. inflate :: proc{inflate_from_context, inflate_from_byte_array}