zlib.odin 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677
  1. package zlib
  2. /*
  3. Copyright 2021 Jeroen van Rijn <[email protected]>.
  4. Made available under Odin's BSD-3 license.
  5. List of contributors:
  6. Jeroen van Rijn: Initial implementation, optimization.
  7. Ginger Bill: Cosmetic changes.
  8. */
  9. import "core:compress"
  10. import "core:mem"
  11. import "core:io"
  12. import "core:hash"
  13. import "core:bytes"
  14. /*
  15. zlib.inflate decompresses a ZLIB stream passed in as a []u8 or io.Stream.
  16. Returns: Error.
  17. */
  18. /*
  19. Do we do Adler32 as we write bytes to output?
  20. It used to be faster to do it inline, now it's faster to do it at the end of `inflate`.
  21. We'll see what's faster after more optimization, and might end up removing
  22. `Context.rolling_hash` if not inlining it is still faster.
  23. */
  24. Compression_Method :: enum u8 {
  25. DEFLATE = 8,
  26. Reserved = 15,
  27. }
  28. Compression_Level :: enum u8 {
  29. Fastest = 0,
  30. Fast = 1,
  31. Default = 2,
  32. Maximum = 3,
  33. }
  34. Options :: struct {
  35. window_size: u16,
  36. level: u8,
  37. }
  38. Error :: compress.Error
  39. E_General :: compress.General_Error
  40. E_ZLIB :: compress.ZLIB_Error
  41. E_Deflate :: compress.Deflate_Error
  42. DEFLATE_MAX_CHUNK_SIZE :: 65535
  43. DEFLATE_MAX_LITERAL_SIZE :: 65535
  44. DEFLATE_MAX_DISTANCE :: 32768
  45. DEFLATE_MAX_LENGTH :: 258
  46. HUFFMAN_MAX_BITS :: 16
  47. HUFFMAN_FAST_BITS :: 9
  48. HUFFMAN_FAST_MASK :: ((1 << HUFFMAN_FAST_BITS) - 1)
  49. Z_LENGTH_BASE := [31]u16{
  50. 3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,
  51. 67,83,99,115,131,163,195,227,258,0,0,
  52. }
  53. Z_LENGTH_EXTRA := [31]u8{
  54. 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0,
  55. }
  56. Z_DIST_BASE := [32]u16{
  57. 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,
  58. 257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0,
  59. }
  60. Z_DIST_EXTRA := [32]u8{
  61. 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13,0,0,
  62. }
  63. Z_LENGTH_DEZIGZAG := []u8{
  64. 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15,
  65. }
  66. Z_FIXED_LENGTH := [288]u8{
  67. 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
  68. 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
  69. 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
  70. 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
  71. 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
  72. 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
  73. 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
  74. 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
  75. 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8,
  76. }
  77. Z_FIXED_DIST := [32]u8{
  78. 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
  79. }
  80. /*
  81. Accelerate all cases in default tables.
  82. */
  83. ZFAST_BITS :: 9
  84. ZFAST_MASK :: ((1 << ZFAST_BITS) - 1)
  85. /*
  86. ZLIB-style Huffman encoding.
  87. JPEG packs from left, ZLIB from right. We can't share code.
  88. */
  89. Huffman_Table :: struct {
  90. fast: [1 << ZFAST_BITS]u16,
  91. firstcode: [16]u16,
  92. maxcode: [17]int,
  93. firstsymbol: [16]u16,
  94. size: [288]u8,
  95. value: [288]u16,
  96. }
  97. // Implementation starts here
  98. @(optimization_mode="speed")
  99. z_bit_reverse :: #force_inline proc(n: u16, bits: u8) -> (r: u16) {
  100. assert(bits <= 16)
  101. // NOTE: Can optimize with llvm.bitreverse.i64 or some bit twiddling
  102. // by reversing all of the bits and masking out the unneeded ones.
  103. r = n
  104. r = ((r & 0xAAAA) >> 1) | ((r & 0x5555) << 1)
  105. r = ((r & 0xCCCC) >> 2) | ((r & 0x3333) << 2)
  106. r = ((r & 0xF0F0) >> 4) | ((r & 0x0F0F) << 4)
  107. r = ((r & 0xFF00) >> 8) | ((r & 0x00FF) << 8)
  108. r >>= (16 - bits)
  109. return
  110. }
  111. @(optimization_mode="speed")
  112. grow_buffer :: proc(buf: ^[dynamic]u8) -> (err: compress.Error) {
  113. /*
  114. That we get here at all means that we didn't pass an expected output size,
  115. or that it was too little.
  116. */
  117. /*
  118. Double until we reach the maximum allowed.
  119. */
  120. new_size := min(len(buf) << 1, compress.COMPRESS_OUTPUT_ALLOCATE_MAX)
  121. resize(buf, new_size)
  122. if len(buf) != new_size {
  123. /*
  124. Resize failed.
  125. */
  126. return .Resize_Failed
  127. }
  128. return nil
  129. }
  130. /*
  131. TODO: Make these return compress.Error.
  132. */
  133. @(optimization_mode="speed")
  134. write_byte :: #force_inline proc(z: ^$C, c: u8) -> (err: io.Error) #no_bounds_check {
  135. /*
  136. Resize if needed.
  137. */
  138. if int(z.bytes_written) + 1 >= len(z.output.buf) {
  139. e := grow_buffer(&z.output.buf)
  140. if e != nil {
  141. return .Short_Write
  142. }
  143. }
  144. #no_bounds_check {
  145. z.output.buf[z.bytes_written] = c
  146. }
  147. z.bytes_written += 1
  148. return .None
  149. }
  150. @(optimization_mode="speed")
  151. repl_byte :: proc(z: ^$C, count: u16, c: u8) -> (err: io.Error) #no_bounds_check {
  152. /*
  153. TODO(Jeroen): Once we have a magic ring buffer, we can just peek/write into it
  154. without having to worry about wrapping, so no need for a temp allocation to give to
  155. the output stream, just give it _that_ slice.
  156. */
  157. /*
  158. Resize if needed.
  159. */
  160. if int(z.bytes_written) + int(count) >= len(z.output.buf) {
  161. e := grow_buffer(&z.output.buf)
  162. if e != nil {
  163. return .Short_Write
  164. }
  165. }
  166. #no_bounds_check {
  167. for _ in 0..<count {
  168. z.output.buf[z.bytes_written] = c
  169. z.bytes_written += 1
  170. }
  171. }
  172. return .None
  173. }
  174. @(optimization_mode="speed")
  175. repl_bytes :: proc(z: ^$C, count: u16, distance: u16) -> (err: io.Error) {
  176. /*
  177. TODO(Jeroen): Once we have a magic ring buffer, we can just peek/write into it
  178. without having to worry about wrapping, so no need for a temp allocation to give to
  179. the output stream, just give it _that_ slice.
  180. */
  181. offset := i64(distance)
  182. if int(z.bytes_written) + int(count) >= len(z.output.buf) {
  183. e := grow_buffer(&z.output.buf)
  184. if e != nil {
  185. return .Short_Write
  186. }
  187. }
  188. #no_bounds_check {
  189. for _ in 0..<count {
  190. c := z.output.buf[z.bytes_written - offset]
  191. z.output.buf[z.bytes_written] = c
  192. z.bytes_written += 1
  193. }
  194. }
  195. return .None
  196. }
  197. allocate_huffman_table :: proc(allocator := context.allocator) -> (z: ^Huffman_Table, err: Error) {
  198. return new(Huffman_Table, allocator), nil
  199. }
  200. @(optimization_mode="speed")
  201. build_huffman :: proc(z: ^Huffman_Table, code_lengths: []u8) -> (err: Error) {
  202. sizes: [HUFFMAN_MAX_BITS+1]int
  203. next_code: [HUFFMAN_MAX_BITS]int
  204. k := int(0)
  205. mem.zero_slice(sizes[:])
  206. mem.zero_slice(z.fast[:])
  207. for v in code_lengths {
  208. sizes[v] += 1
  209. }
  210. sizes[0] = 0
  211. for i in 1..<(HUFFMAN_MAX_BITS+1) {
  212. if sizes[i] > (1 << uint(i)) {
  213. return E_Deflate.Huffman_Bad_Sizes
  214. }
  215. }
  216. code := int(0)
  217. for i in 1..<HUFFMAN_MAX_BITS {
  218. next_code[i] = code
  219. z.firstcode[i] = u16(code)
  220. z.firstsymbol[i] = u16(k)
  221. code = code + sizes[i]
  222. if sizes[i] != 0 {
  223. if code - 1 >= (1 << u16(i)) {
  224. return E_Deflate.Huffman_Bad_Code_Lengths
  225. }
  226. }
  227. z.maxcode[i] = code << (HUFFMAN_MAX_BITS - uint(i))
  228. code <<= 1
  229. k += int(sizes[i])
  230. }
  231. z.maxcode[HUFFMAN_MAX_BITS] = 0x10000 // Sentinel
  232. c: int
  233. for v, ci in code_lengths {
  234. if v != 0 {
  235. c = next_code[v] - int(z.firstcode[v]) + int(z.firstsymbol[v])
  236. fastv := u16((u16(v) << 9) | u16(ci))
  237. z.size[c] = u8(v)
  238. z.value[c] = u16(ci)
  239. if v <= ZFAST_BITS {
  240. j := z_bit_reverse(u16(next_code[v]), v)
  241. for j < (1 << ZFAST_BITS) {
  242. z.fast[j] = fastv
  243. j += (1 << v)
  244. }
  245. }
  246. next_code[v] += 1
  247. }
  248. }
  249. return nil
  250. }
  251. @(optimization_mode="speed")
  252. decode_huffman_slowpath :: proc(z: ^$C, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
  253. code := u16(compress.peek_bits_lsb(z,16))
  254. k := int(z_bit_reverse(code, 16))
  255. s: u8
  256. #no_bounds_check for s = HUFFMAN_FAST_BITS+1; ; {
  257. if k < t.maxcode[s] {
  258. break
  259. }
  260. s += 1
  261. }
  262. if s >= 16 {
  263. return 0, E_Deflate.Bad_Huffman_Code
  264. }
  265. // code size is s, so:
  266. b := (k >> (16-s)) - int(t.firstcode[s]) + int(t.firstsymbol[s])
  267. if b >= size_of(t.size) {
  268. return 0, E_Deflate.Bad_Huffman_Code
  269. }
  270. if t.size[b] != s {
  271. return 0, E_Deflate.Bad_Huffman_Code
  272. }
  273. compress.consume_bits_lsb(z, s)
  274. r = t.value[b]
  275. return r, nil
  276. }
  277. @(optimization_mode="speed")
  278. decode_huffman :: proc(z: ^$C, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
  279. if z.num_bits < 16 {
  280. if z.num_bits > 63 {
  281. return 0, E_ZLIB.Code_Buffer_Malformed
  282. }
  283. compress.refill_lsb(z)
  284. if z.num_bits > 63 {
  285. return 0, E_General.Stream_Too_Short
  286. }
  287. }
  288. #no_bounds_check b := t.fast[z.code_buffer & ZFAST_MASK]
  289. if b != 0 {
  290. s := u8(b >> ZFAST_BITS)
  291. compress.consume_bits_lsb(z, s)
  292. return b & 511, nil
  293. }
  294. return decode_huffman_slowpath(z, t)
  295. }
  296. @(optimization_mode="speed")
  297. parse_huffman_block :: proc(z: ^$C, z_repeat, z_offset: ^Huffman_Table) -> (err: Error) #no_bounds_check {
  298. #no_bounds_check for {
  299. value, e := decode_huffman(z, z_repeat)
  300. if e != nil {
  301. return err
  302. }
  303. if value < 256 {
  304. e := write_byte(z, u8(value))
  305. if e != .None {
  306. return E_General.Output_Too_Short
  307. }
  308. } else {
  309. if value == 256 {
  310. // End of block
  311. return nil
  312. }
  313. value -= 257
  314. length := Z_LENGTH_BASE[value]
  315. if Z_LENGTH_EXTRA[value] > 0 {
  316. length += u16(compress.read_bits_lsb(z, Z_LENGTH_EXTRA[value]))
  317. }
  318. value, e = decode_huffman(z, z_offset)
  319. if e != nil {
  320. return E_Deflate.Bad_Huffman_Code
  321. }
  322. distance := Z_DIST_BASE[value]
  323. if Z_DIST_EXTRA[value] > 0 {
  324. distance += u16(compress.read_bits_lsb(z, Z_DIST_EXTRA[value]))
  325. }
  326. if z.bytes_written < i64(distance) {
  327. // Distance is longer than we've decoded so far.
  328. return E_Deflate.Bad_Distance
  329. }
  330. /*
  331. These might be sped up with a repl_byte call that copies
  332. from the already written output more directly, and that
  333. update the Adler checksum once after.
  334. That way we'd suffer less Stream vtable overhead.
  335. */
  336. if distance == 1 {
  337. /*
  338. Replicate the last outputted byte, length times.
  339. */
  340. if length > 0 {
  341. c := z.output.buf[z.bytes_written - i64(distance)]
  342. e := repl_byte(z, length, c)
  343. if e != .None {
  344. return E_General.Output_Too_Short
  345. }
  346. }
  347. } else {
  348. if length > 0 {
  349. e := repl_bytes(z, length, distance)
  350. if e != .None {
  351. return E_General.Output_Too_Short
  352. }
  353. }
  354. }
  355. }
  356. }
  357. }
  358. @(optimization_mode="speed")
  359. inflate_from_context :: proc(using ctx: ^compress.Context_Memory_Input, raw := false, expected_output_size := -1, allocator := context.allocator) -> (err: Error) #no_bounds_check {
  360. /*
  361. ctx.output must be a bytes.Buffer for now. We'll add a separate implementation that writes to a stream.
  362. raw determines whether the ZLIB header is processed, or we're inflating a raw
  363. DEFLATE stream.
  364. */
  365. if !raw {
  366. size, size_err := compress.input_size(ctx)
  367. if size < 6 || size_err != nil {
  368. return E_General.Stream_Too_Short
  369. }
  370. cmf, _ := compress.read_u8(ctx)
  371. method := Compression_Method(cmf & 0xf)
  372. if method != .DEFLATE {
  373. return E_General.Unknown_Compression_Method
  374. }
  375. if cinfo := (cmf >> 4) & 0xf; cinfo > 7 {
  376. return E_ZLIB.Unsupported_Window_Size
  377. }
  378. flg, _ := compress.read_u8(ctx)
  379. fcheck := flg & 0x1f
  380. fcheck_computed := (cmf << 8 | flg) & 0x1f
  381. if fcheck != fcheck_computed {
  382. return E_General.Checksum_Failed
  383. }
  384. /*
  385. We don't handle built-in dictionaries for now.
  386. They're application specific and PNG doesn't use them.
  387. */
  388. if fdict := (flg >> 5) & 1; fdict != 0 {
  389. return E_ZLIB.FDICT_Unsupported
  390. }
  391. // flevel := Compression_Level((flg >> 6) & 3);
  392. /*
  393. Inflate can consume bits belonging to the Adler checksum.
  394. We pass the entire stream to Inflate and will unget bytes if we need to
  395. at the end to compare checksums.
  396. */
  397. }
  398. // Parse ZLIB stream without header.
  399. inflate_raw(z=ctx, expected_output_size=expected_output_size) or_return
  400. if !raw {
  401. compress.discard_to_next_byte_lsb(ctx)
  402. adler_b: [4]u8
  403. for _, i in adler_b {
  404. adler_b[i], _ = compress.read_u8_prefer_code_buffer_lsb(ctx)
  405. }
  406. adler := transmute(u32be)adler_b
  407. output_hash := hash.adler32(ctx.output.buf[:])
  408. if output_hash != u32(adler) {
  409. return E_General.Checksum_Failed
  410. }
  411. }
  412. return nil
  413. }
  414. // TODO: Check alignment of reserve/resize.
  415. @(optimization_mode="speed")
  416. inflate_raw :: proc(z: ^$C, expected_output_size := -1, allocator := context.allocator) -> (err: Error) #no_bounds_check {
  417. context.allocator = allocator
  418. expected_output_size := expected_output_size
  419. /*
  420. Always set up a minimum allocation size.
  421. */
  422. expected_output_size = max(max(expected_output_size, compress.COMPRESS_OUTPUT_ALLOCATE_MIN), 512)
  423. // fmt.printf("\nZLIB: Expected Payload Size: %v\n\n", expected_output_size);
  424. if expected_output_size > 0 && expected_output_size <= compress.COMPRESS_OUTPUT_ALLOCATE_MAX {
  425. /*
  426. Try to pre-allocate the output buffer.
  427. */
  428. reserve(&z.output.buf, expected_output_size)
  429. resize (&z.output.buf, expected_output_size)
  430. }
  431. if len(z.output.buf) != expected_output_size {
  432. return .Resize_Failed
  433. }
  434. z.num_bits = 0
  435. z.code_buffer = 0
  436. z_repeat: ^Huffman_Table
  437. z_offset: ^Huffman_Table
  438. codelength_ht: ^Huffman_Table
  439. defer free(z_repeat)
  440. defer free(z_offset)
  441. defer free(codelength_ht)
  442. z_repeat = allocate_huffman_table() or_return
  443. z_offset = allocate_huffman_table() or_return
  444. codelength_ht = allocate_huffman_table() or_return
  445. final := u32(0)
  446. type := u32(0)
  447. for {
  448. final = compress.read_bits_lsb(z, 1)
  449. type = compress.read_bits_lsb(z, 2)
  450. // fmt.printf("Final: %v | Type: %v\n", final, type);
  451. switch type {
  452. case 0:
  453. // Uncompressed block
  454. // Discard bits until next byte boundary
  455. compress.discard_to_next_byte_lsb(z)
  456. uncompressed_len := i16(compress.read_bits_lsb(z, 16))
  457. length_check := i16(compress.read_bits_lsb(z, 16))
  458. // fmt.printf("LEN: %v, ~LEN: %v, NLEN: %v, ~NLEN: %v\n", uncompressed_len, ~uncompressed_len, length_check, ~length_check);
  459. if ~uncompressed_len != length_check {
  460. return E_Deflate.Len_Nlen_Mismatch
  461. }
  462. /*
  463. TODO: Maybe speed this up with a stream-to-stream copy (read_from)
  464. and a single Adler32 update after.
  465. */
  466. #no_bounds_check for uncompressed_len > 0 {
  467. compress.refill_lsb(z)
  468. lit := compress.read_bits_lsb(z, 8)
  469. write_byte(z, u8(lit))
  470. uncompressed_len -= 1
  471. }
  472. case 3:
  473. return E_Deflate.BType_3
  474. case:
  475. // log.debugf("Err: %v | Final: %v | Type: %v\n", err, final, type);
  476. if type == 1 {
  477. // Use fixed code lengths.
  478. build_huffman(z_repeat, Z_FIXED_LENGTH[:]) or_return
  479. build_huffman(z_offset, Z_FIXED_DIST[:]) or_return
  480. } else {
  481. lencodes: [286+32+137]u8
  482. codelength_sizes: [19]u8
  483. //i: u32;
  484. n: u32
  485. compress.refill_lsb(z, 14)
  486. hlit := compress.read_bits_no_refill_lsb(z, 5) + 257
  487. hdist := compress.read_bits_no_refill_lsb(z, 5) + 1
  488. hclen := compress.read_bits_no_refill_lsb(z, 4) + 4
  489. ntot := hlit + hdist
  490. #no_bounds_check for i in 0..<hclen {
  491. s := compress.read_bits_lsb(z, 3)
  492. codelength_sizes[Z_LENGTH_DEZIGZAG[i]] = u8(s)
  493. }
  494. build_huffman(codelength_ht, codelength_sizes[:]) or_return
  495. n = 0
  496. c: u16
  497. for n < ntot {
  498. c = decode_huffman(z, codelength_ht) or_return
  499. if c < 0 || c >= 19 {
  500. return E_Deflate.Huffman_Bad_Code_Lengths
  501. }
  502. if c < 16 {
  503. lencodes[n] = u8(c)
  504. n += 1
  505. } else {
  506. fill := u8(0)
  507. compress.refill_lsb(z, 7)
  508. switch c {
  509. case 16:
  510. c = u16(compress.read_bits_no_refill_lsb(z, 2) + 3)
  511. if n == 0 {
  512. return E_Deflate.Huffman_Bad_Code_Lengths
  513. }
  514. fill = lencodes[n - 1]
  515. case 17:
  516. c = u16(compress.read_bits_no_refill_lsb(z, 3) + 3)
  517. case 18:
  518. c = u16(compress.read_bits_no_refill_lsb(z, 7) + 11)
  519. case:
  520. return E_Deflate.Huffman_Bad_Code_Lengths
  521. }
  522. if ntot - n < u32(c) {
  523. return E_Deflate.Huffman_Bad_Code_Lengths
  524. }
  525. nc := n + u32(c)
  526. #no_bounds_check for ; n < nc; n += 1 {
  527. lencodes[n] = fill
  528. }
  529. }
  530. }
  531. if n != ntot {
  532. return E_Deflate.Huffman_Bad_Code_Lengths
  533. }
  534. build_huffman(z_repeat, lencodes[:hlit]) or_return
  535. build_huffman(z_offset, lencodes[hlit:ntot]) or_return
  536. }
  537. parse_huffman_block(z, z_repeat, z_offset) or_return
  538. }
  539. if final == 1 {
  540. break
  541. }
  542. }
  543. if int(z.bytes_written) != len(z.output.buf) {
  544. resize(&z.output.buf, int(z.bytes_written))
  545. }
  546. return nil
  547. }
  548. inflate_from_byte_array :: proc(input: []u8, buf: ^bytes.Buffer, raw := false, expected_output_size := -1) -> (err: Error) {
  549. ctx := compress.Context_Memory_Input{}
  550. ctx.input_data = input
  551. ctx.output = buf
  552. return inflate_from_context(ctx=&ctx, raw=raw, expected_output_size=expected_output_size)
  553. }
  554. inflate_from_byte_array_raw :: proc(input: []u8, buf: ^bytes.Buffer, raw := false, expected_output_size := -1) -> (err: Error) {
  555. ctx := compress.Context_Memory_Input{}
  556. ctx.input_data = input
  557. ctx.output = buf
  558. return inflate_raw(z=&ctx, expected_output_size=expected_output_size)
  559. }
  560. inflate :: proc{inflate_from_context, inflate_from_byte_array};