zlib.odin 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681
  1. //+vet !using-param
  2. package zlib
  3. /*
  4. Copyright 2021 Jeroen van Rijn <[email protected]>.
  5. Made available under Odin's BSD-3 license.
  6. List of contributors:
  7. Jeroen van Rijn: Initial implementation, optimization.
  8. Ginger Bill: Cosmetic changes.
  9. */
  10. import "core:compress"
  11. import "core:mem"
  12. import "core:io"
  13. import "core:hash"
  14. import "core:bytes"
  15. /*
  16. zlib.inflate decompresses a ZLIB stream passed in as a []u8 or io.Stream.
  17. Returns: Error.
  18. */
  19. /*
  20. Do we do Adler32 as we write bytes to output?
  21. It used to be faster to do it inline, now it's faster to do it at the end of `inflate`.
  22. We'll see what's faster after more optimization, and might end up removing
  23. `Context.rolling_hash` if not inlining it is still faster.
  24. */
  25. Compression_Method :: enum u8 {
  26. DEFLATE = 8,
  27. Reserved = 15,
  28. }
  29. Compression_Level :: enum u8 {
  30. Fastest = 0,
  31. Fast = 1,
  32. Default = 2,
  33. Maximum = 3,
  34. }
  35. Options :: struct {
  36. window_size: u16,
  37. level: u8,
  38. }
  39. Error :: compress.Error
  40. General_Error :: compress.General_Error
  41. ZLIB_Error :: compress.ZLIB_Error
  42. Deflate_Error :: compress.Deflate_Error
  43. DEFLATE_MAX_CHUNK_SIZE :: 65535
  44. DEFLATE_MAX_LITERAL_SIZE :: 65535
  45. DEFLATE_MAX_DISTANCE :: 32768
  46. DEFLATE_MAX_LENGTH :: 258
  47. HUFFMAN_MAX_BITS :: 16
  48. HUFFMAN_FAST_BITS :: 9
  49. HUFFMAN_FAST_MASK :: ((1 << HUFFMAN_FAST_BITS) - 1)
  50. Z_LENGTH_BASE := [31]u16{
  51. 3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,
  52. 67,83,99,115,131,163,195,227,258,0,0,
  53. }
  54. Z_LENGTH_EXTRA := [31]u8{
  55. 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0,
  56. }
  57. Z_DIST_BASE := [32]u16{
  58. 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,
  59. 257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0,
  60. }
  61. Z_DIST_EXTRA := [32]u8{
  62. 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13,0,0,
  63. }
  64. Z_LENGTH_DEZIGZAG := []u8{
  65. 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15,
  66. }
  67. Z_FIXED_LENGTH := [288]u8{
  68. 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
  69. 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
  70. 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
  71. 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
  72. 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
  73. 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
  74. 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
  75. 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
  76. 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8,
  77. }
  78. Z_FIXED_DIST := [32]u8{
  79. 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
  80. }
  81. /*
  82. Accelerate all cases in default tables.
  83. */
  84. ZFAST_BITS :: 9
  85. ZFAST_MASK :: ((1 << ZFAST_BITS) - 1)
  86. /*
  87. ZLIB-style Huffman encoding.
  88. JPEG packs from left, ZLIB from right. We can't share code.
  89. */
  90. Huffman_Table :: struct {
  91. fast: [1 << ZFAST_BITS]u16,
  92. firstcode: [17]u16,
  93. maxcode: [17]int,
  94. firstsymbol: [17]u16,
  95. size: [288]u8,
  96. value: [288]u16,
  97. }
  98. // Implementation starts here
  99. @(optimization_mode="speed")
  100. z_bit_reverse :: #force_inline proc(n: u16, bits: u8) -> (r: u16) {
  101. assert(bits <= 16)
  102. // NOTE: Can optimize with llvm.bitreverse.i64 or some bit twiddling
  103. // by reversing all of the bits and masking out the unneeded ones.
  104. r = n
  105. r = ((r & 0xAAAA) >> 1) | ((r & 0x5555) << 1)
  106. r = ((r & 0xCCCC) >> 2) | ((r & 0x3333) << 2)
  107. r = ((r & 0xF0F0) >> 4) | ((r & 0x0F0F) << 4)
  108. r = ((r & 0xFF00) >> 8) | ((r & 0x00FF) << 8)
  109. r >>= (16 - bits)
  110. return
  111. }
  112. @(optimization_mode="speed")
  113. grow_buffer :: proc(buf: ^[dynamic]u8) -> (err: compress.Error) {
  114. /*
  115. That we get here at all means that we didn't pass an expected output size,
  116. or that it was too little.
  117. */
  118. /*
  119. Double until we reach the maximum allowed.
  120. */
  121. new_size := min(len(buf) << 1, compress.COMPRESS_OUTPUT_ALLOCATE_MAX)
  122. resize(buf, new_size)
  123. if len(buf) != new_size {
  124. /*
  125. Resize failed.
  126. */
  127. return .Resize_Failed
  128. }
  129. return nil
  130. }
  131. /*
  132. TODO: Make these return compress.Error.
  133. */
  134. @(optimization_mode="speed")
  135. write_byte :: #force_inline proc(z: ^$C, c: u8) -> (err: io.Error) #no_bounds_check {
  136. /*
  137. Resize if needed.
  138. */
  139. if int(z.bytes_written) + 1 >= len(z.output.buf) {
  140. e := grow_buffer(&z.output.buf)
  141. if e != nil {
  142. return .Short_Write
  143. }
  144. }
  145. #no_bounds_check {
  146. z.output.buf[z.bytes_written] = c
  147. }
  148. z.bytes_written += 1
  149. return .None
  150. }
  151. @(optimization_mode="speed")
  152. repl_byte :: proc(z: ^$C, count: u16, c: u8) -> (err: io.Error) #no_bounds_check {
  153. /*
  154. TODO(Jeroen): Once we have a magic ring buffer, we can just peek/write into it
  155. without having to worry about wrapping, so no need for a temp allocation to give to
  156. the output stream, just give it _that_ slice.
  157. */
  158. /*
  159. Resize if needed.
  160. */
  161. if int(z.bytes_written) + int(count) >= len(z.output.buf) {
  162. e := grow_buffer(&z.output.buf)
  163. if e != nil {
  164. return .Short_Write
  165. }
  166. }
  167. #no_bounds_check {
  168. for _ in 0..<count {
  169. z.output.buf[z.bytes_written] = c
  170. z.bytes_written += 1
  171. }
  172. }
  173. return .None
  174. }
  175. @(optimization_mode="speed")
  176. repl_bytes :: proc(z: ^$C, count: u16, distance: u16) -> (err: io.Error) {
  177. /*
  178. TODO(Jeroen): Once we have a magic ring buffer, we can just peek/write into it
  179. without having to worry about wrapping, so no need for a temp allocation to give to
  180. the output stream, just give it _that_ slice.
  181. */
  182. offset := i64(distance)
  183. if int(z.bytes_written) + int(count) >= len(z.output.buf) {
  184. e := grow_buffer(&z.output.buf)
  185. if e != nil {
  186. return .Short_Write
  187. }
  188. }
  189. #no_bounds_check {
  190. for _ in 0..<count {
  191. c := z.output.buf[z.bytes_written - offset]
  192. z.output.buf[z.bytes_written] = c
  193. z.bytes_written += 1
  194. }
  195. }
  196. return .None
  197. }
  198. allocate_huffman_table :: proc(allocator := context.allocator) -> (z: ^Huffman_Table, err: Error) {
  199. return new(Huffman_Table, allocator), nil
  200. }
  201. @(optimization_mode="speed")
  202. build_huffman :: proc(z: ^Huffman_Table, code_lengths: []u8) -> (err: Error) {
  203. sizes: [HUFFMAN_MAX_BITS+1]int
  204. next_code: [HUFFMAN_MAX_BITS+1]int
  205. k := int(0)
  206. mem.zero_slice(sizes[:])
  207. mem.zero_slice(z.fast[:])
  208. for v in code_lengths {
  209. sizes[v] += 1
  210. }
  211. sizes[0] = 0
  212. for i in 1 ..< HUFFMAN_MAX_BITS {
  213. if sizes[i] > (1 << uint(i)) {
  214. return .Huffman_Bad_Sizes
  215. }
  216. }
  217. code := int(0)
  218. for i in 1 ..= HUFFMAN_MAX_BITS {
  219. next_code[i] = code
  220. z.firstcode[i] = u16(code)
  221. z.firstsymbol[i] = u16(k)
  222. code = code + sizes[i]
  223. if sizes[i] != 0 {
  224. if code - 1 >= (1 << u16(i)) {
  225. return .Huffman_Bad_Code_Lengths
  226. }
  227. }
  228. z.maxcode[i] = code << (HUFFMAN_MAX_BITS - uint(i))
  229. code <<= 1
  230. k += int(sizes[i])
  231. }
  232. z.maxcode[HUFFMAN_MAX_BITS] = 0x10000 // Sentinel
  233. c: int
  234. for v, ci in code_lengths {
  235. if v != 0 {
  236. c = next_code[v] - int(z.firstcode[v]) + int(z.firstsymbol[v])
  237. fastv := u16((u16(v) << 9) | u16(ci))
  238. z.size[c] = u8(v)
  239. z.value[c] = u16(ci)
  240. if v <= ZFAST_BITS {
  241. j := z_bit_reverse(u16(next_code[v]), v)
  242. for j < (1 << ZFAST_BITS) {
  243. z.fast[j] = fastv
  244. j += (1 << v)
  245. }
  246. }
  247. next_code[v] += 1
  248. }
  249. }
  250. return nil
  251. }
  252. @(optimization_mode="speed")
  253. decode_huffman_slowpath :: proc(z: ^$C, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
  254. code := u16(compress.peek_bits_lsb(z,16))
  255. k := int(z_bit_reverse(code, 16))
  256. s: u8
  257. #no_bounds_check for s = HUFFMAN_FAST_BITS+1; ; {
  258. if k < t.maxcode[s] {
  259. break
  260. }
  261. s += 1
  262. }
  263. if s >= 16 {
  264. return 0, .Bad_Huffman_Code
  265. }
  266. // code size is s, so:
  267. b := (k >> (16-s)) - int(t.firstcode[s]) + int(t.firstsymbol[s])
  268. if b >= size_of(t.size) {
  269. return 0, .Bad_Huffman_Code
  270. }
  271. if t.size[b] != s {
  272. return 0, .Bad_Huffman_Code
  273. }
  274. compress.consume_bits_lsb(z, s)
  275. r = t.value[b]
  276. return r, nil
  277. }
  278. @(optimization_mode="speed")
  279. decode_huffman :: proc(z: ^$C, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
  280. if z.num_bits < 16 {
  281. if z.num_bits > 63 {
  282. return 0, .Code_Buffer_Malformed
  283. }
  284. compress.refill_lsb(z)
  285. if z.num_bits > 63 {
  286. return 0, .Stream_Too_Short
  287. }
  288. }
  289. #no_bounds_check b := t.fast[z.code_buffer & ZFAST_MASK]
  290. if b != 0 {
  291. s := u8(b >> ZFAST_BITS)
  292. compress.consume_bits_lsb(z, s)
  293. return b & 511, nil
  294. }
  295. return decode_huffman_slowpath(z, t)
  296. }
  297. @(optimization_mode="speed")
  298. parse_huffman_block :: proc(z: ^$C, z_repeat, z_offset: ^Huffman_Table) -> (err: Error) #no_bounds_check {
  299. #no_bounds_check for {
  300. value, e := decode_huffman(z, z_repeat)
  301. if e != nil {
  302. return err
  303. }
  304. if value < 256 {
  305. e := write_byte(z, u8(value))
  306. if e != .None {
  307. return .Output_Too_Short
  308. }
  309. } else {
  310. if value == 256 {
  311. // End of block
  312. return nil
  313. }
  314. value -= 257
  315. length := Z_LENGTH_BASE[value]
  316. if Z_LENGTH_EXTRA[value] > 0 {
  317. length += u16(compress.read_bits_lsb(z, Z_LENGTH_EXTRA[value]))
  318. }
  319. value, e = decode_huffman(z, z_offset)
  320. if e != nil {
  321. return .Bad_Huffman_Code
  322. }
  323. distance := Z_DIST_BASE[value]
  324. if Z_DIST_EXTRA[value] > 0 {
  325. distance += u16(compress.read_bits_lsb(z, Z_DIST_EXTRA[value]))
  326. }
  327. if z.bytes_written < i64(distance) {
  328. // Distance is longer than we've decoded so far.
  329. return .Bad_Distance
  330. }
  331. /*
  332. These might be sped up with a repl_byte call that copies
  333. from the already written output more directly, and that
  334. update the Adler checksum once after.
  335. That way we'd suffer less Stream vtable overhead.
  336. */
  337. if distance == 1 {
  338. /*
  339. Replicate the last outputted byte, length times.
  340. */
  341. if length > 0 {
  342. c := z.output.buf[z.bytes_written - i64(distance)]
  343. e := repl_byte(z, length, c)
  344. if e != .None {
  345. return .Output_Too_Short
  346. }
  347. }
  348. } else {
  349. if length > 0 {
  350. e := repl_bytes(z, length, distance)
  351. if e != .None {
  352. return .Output_Too_Short
  353. }
  354. }
  355. }
  356. }
  357. }
  358. }
  359. @(optimization_mode="speed")
  360. inflate_from_context :: proc(using ctx: ^compress.Context_Memory_Input, raw := false, expected_output_size := -1, allocator := context.allocator) -> (err: Error) #no_bounds_check {
  361. /*
  362. ctx.output must be a bytes.Buffer for now. We'll add a separate implementation that writes to a stream.
  363. raw determines whether the ZLIB header is processed, or we're inflating a raw
  364. DEFLATE stream.
  365. */
  366. if !raw {
  367. size, size_err := compress.input_size(ctx)
  368. if size < 6 || size_err != nil {
  369. return .Stream_Too_Short
  370. }
  371. cmf, _ := compress.read_u8(ctx)
  372. method := Compression_Method(cmf & 0xf)
  373. if method != .DEFLATE {
  374. return .Unknown_Compression_Method
  375. }
  376. if cinfo := (cmf >> 4) & 0xf; cinfo > 7 {
  377. return .Unsupported_Window_Size
  378. }
  379. flg, _ := compress.read_u8(ctx)
  380. fcheck := flg & 0x1f
  381. fcheck_computed := (cmf << 8 | flg) & 0x1f
  382. if fcheck != fcheck_computed {
  383. return .Checksum_Failed
  384. }
  385. /*
  386. We don't handle built-in dictionaries for now.
  387. They're application specific and PNG doesn't use them.
  388. */
  389. if fdict := (flg >> 5) & 1; fdict != 0 {
  390. return .FDICT_Unsupported
  391. }
  392. // flevel := Compression_Level((flg >> 6) & 3);
  393. /*
  394. Inflate can consume bits belonging to the Adler checksum.
  395. We pass the entire stream to Inflate and will unget bytes if we need to
  396. at the end to compare checksums.
  397. */
  398. }
  399. // Parse ZLIB stream without header.
  400. inflate_raw(ctx, expected_output_size=expected_output_size) or_return
  401. if !raw {
  402. compress.discard_to_next_byte_lsb(ctx)
  403. adler_b: [4]u8
  404. for _, i in adler_b {
  405. adler_b[i], _ = compress.read_u8_prefer_code_buffer_lsb(ctx)
  406. }
  407. adler := transmute(u32be)adler_b
  408. output_hash := hash.adler32(ctx.output.buf[:])
  409. if output_hash != u32(adler) {
  410. return .Checksum_Failed
  411. }
  412. }
  413. return nil
  414. }
  415. // TODO: Check alignment of reserve/resize.
  416. @(optimization_mode="speed")
  417. inflate_raw :: proc(z: ^$C, expected_output_size := -1, allocator := context.allocator) -> (err: Error) #no_bounds_check {
  418. context.allocator = allocator
  419. expected_output_size := expected_output_size
  420. /*
  421. Always set up a minimum allocation size.
  422. */
  423. expected_output_size = max(max(expected_output_size, compress.COMPRESS_OUTPUT_ALLOCATE_MIN), 512)
  424. // fmt.printf("\nZLIB: Expected Payload Size: %v\n\n", expected_output_size);
  425. if expected_output_size > 0 && expected_output_size <= compress.COMPRESS_OUTPUT_ALLOCATE_MAX {
  426. /*
  427. Try to pre-allocate the output buffer.
  428. */
  429. reserve(&z.output.buf, expected_output_size)
  430. resize (&z.output.buf, expected_output_size)
  431. }
  432. if len(z.output.buf) != expected_output_size {
  433. return .Resize_Failed
  434. }
  435. z.num_bits = 0
  436. z.code_buffer = 0
  437. z_repeat: ^Huffman_Table
  438. z_offset: ^Huffman_Table
  439. codelength_ht: ^Huffman_Table
  440. defer free(z_repeat)
  441. defer free(z_offset)
  442. defer free(codelength_ht)
  443. z_repeat = allocate_huffman_table() or_return
  444. z_offset = allocate_huffman_table() or_return
  445. codelength_ht = allocate_huffman_table() or_return
  446. final := u32(0)
  447. type := u32(0)
  448. for {
  449. final = compress.read_bits_lsb(z, 1)
  450. type = compress.read_bits_lsb(z, 2)
  451. // fmt.printf("Final: %v | Type: %v\n", final, type)
  452. switch type {
  453. case 0:
  454. // fmt.printf("Method 0: STORED\n")
  455. // Uncompressed block
  456. // Discard bits until next byte boundary
  457. compress.discard_to_next_byte_lsb(z)
  458. uncompressed_len := u16(compress.read_bits_lsb(z, 16))
  459. length_check := u16(compress.read_bits_lsb(z, 16))
  460. // fmt.printf("LEN: %v, ~LEN: %v, NLEN: %v, ~NLEN: %v\n", uncompressed_len, ~uncompressed_len, length_check, ~length_check)
  461. if ~uncompressed_len != length_check {
  462. return .Len_Nlen_Mismatch
  463. }
  464. /*
  465. TODO: Maybe speed this up with a stream-to-stream copy (read_from)
  466. and a single Adler32 update after.
  467. */
  468. #no_bounds_check for uncompressed_len > 0 {
  469. compress.refill_lsb(z)
  470. lit := compress.read_bits_lsb(z, 8)
  471. write_byte(z, u8(lit))
  472. uncompressed_len -= 1
  473. }
  474. assert(uncompressed_len == 0)
  475. case 3:
  476. return .BType_3
  477. case:
  478. // fmt.printf("Err: %v | Final: %v | Type: %v\n", err, final, type)
  479. if type == 1 {
  480. // Use fixed code lengths.
  481. build_huffman(z_repeat, Z_FIXED_LENGTH[:]) or_return
  482. build_huffman(z_offset, Z_FIXED_DIST[:]) or_return
  483. } else {
  484. lencodes: [286+32+137]u8
  485. codelength_sizes: [19]u8
  486. //i: u32;
  487. n: u32
  488. compress.refill_lsb(z, 14)
  489. hlit := compress.read_bits_no_refill_lsb(z, 5) + 257
  490. hdist := compress.read_bits_no_refill_lsb(z, 5) + 1
  491. hclen := compress.read_bits_no_refill_lsb(z, 4) + 4
  492. ntot := hlit + hdist
  493. #no_bounds_check for i in 0..<hclen {
  494. s := compress.read_bits_lsb(z, 3)
  495. codelength_sizes[Z_LENGTH_DEZIGZAG[i]] = u8(s)
  496. }
  497. build_huffman(codelength_ht, codelength_sizes[:]) or_return
  498. n = 0
  499. c: u16
  500. for n < ntot {
  501. c = decode_huffman(z, codelength_ht) or_return
  502. if c < 0 || c >= 19 {
  503. return .Huffman_Bad_Code_Lengths
  504. }
  505. if c < 16 {
  506. lencodes[n] = u8(c)
  507. n += 1
  508. } else {
  509. fill := u8(0)
  510. compress.refill_lsb(z, 7)
  511. switch c {
  512. case 16:
  513. c = u16(compress.read_bits_no_refill_lsb(z, 2) + 3)
  514. if n == 0 {
  515. return .Huffman_Bad_Code_Lengths
  516. }
  517. fill = lencodes[n - 1]
  518. case 17:
  519. c = u16(compress.read_bits_no_refill_lsb(z, 3) + 3)
  520. case 18:
  521. c = u16(compress.read_bits_no_refill_lsb(z, 7) + 11)
  522. case:
  523. return .Huffman_Bad_Code_Lengths
  524. }
  525. if ntot - n < u32(c) {
  526. return .Huffman_Bad_Code_Lengths
  527. }
  528. nc := n + u32(c)
  529. #no_bounds_check for ; n < nc; n += 1 {
  530. lencodes[n] = fill
  531. }
  532. }
  533. }
  534. if n != ntot {
  535. return .Huffman_Bad_Code_Lengths
  536. }
  537. build_huffman(z_repeat, lencodes[:hlit]) or_return
  538. build_huffman(z_offset, lencodes[hlit:ntot]) or_return
  539. }
  540. parse_huffman_block(z, z_repeat, z_offset) or_return
  541. }
  542. if final == 1 {
  543. break
  544. }
  545. }
  546. if int(z.bytes_written) != len(z.output.buf) {
  547. resize(&z.output.buf, int(z.bytes_written))
  548. }
  549. return nil
  550. }
  551. inflate_from_byte_array :: proc(input: []u8, buf: ^bytes.Buffer, raw := false, expected_output_size := -1) -> (err: Error) {
  552. ctx := compress.Context_Memory_Input{}
  553. ctx.input_data = input
  554. ctx.output = buf
  555. return inflate_from_context(&ctx, raw=raw, expected_output_size=expected_output_size)
  556. }
  557. inflate_from_byte_array_raw :: proc(input: []u8, buf: ^bytes.Buffer, raw := false, expected_output_size := -1) -> (err: Error) {
  558. ctx := compress.Context_Memory_Input{}
  559. ctx.input_data = input
  560. ctx.output = buf
  561. return inflate_raw(&ctx, expected_output_size=expected_output_size)
  562. }
  563. inflate :: proc{inflate_from_context, inflate_from_byte_array}