zlib.odin 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673
  1. //+vet !using-param
  2. package zlib
  3. /*
  4. Copyright 2021 Jeroen van Rijn <[email protected]>.
  5. Made available under Odin's BSD-3 license.
  6. List of contributors:
  7. Jeroen van Rijn: Initial implementation, optimization.
  8. Ginger Bill: Cosmetic changes.
  9. */
  10. import "core:compress"
  11. import "core:mem"
  12. import "core:io"
  13. import "core:hash"
  14. import "core:bytes"
  15. /*
  16. zlib.inflate decompresses a ZLIB stream passed in as a []u8 or io.Stream.
  17. Returns: Error.
  18. */
  19. /*
  20. Do we do Adler32 as we write bytes to output?
  21. It used to be faster to do it inline, now it's faster to do it at the end of `inflate`.
  22. We'll see what's faster after more optimization, and might end up removing
  23. `Context.rolling_hash` if not inlining it is still faster.
  24. */
  25. Compression_Method :: enum u8 {
  26. DEFLATE = 8,
  27. Reserved = 15,
  28. }
  29. Compression_Level :: enum u8 {
  30. Fastest = 0,
  31. Fast = 1,
  32. Default = 2,
  33. Maximum = 3,
  34. }
  35. Options :: struct {
  36. window_size: u16,
  37. level: u8,
  38. }
  39. Error :: compress.Error
  40. General_Error :: compress.General_Error
  41. ZLIB_Error :: compress.ZLIB_Error
  42. Deflate_Error :: compress.Deflate_Error
  43. DEFLATE_MAX_CHUNK_SIZE :: 65535
  44. DEFLATE_MAX_LITERAL_SIZE :: 65535
  45. DEFLATE_MAX_DISTANCE :: 32768
  46. DEFLATE_MAX_LENGTH :: 258
  47. HUFFMAN_MAX_BITS :: 16
  48. HUFFMAN_FAST_BITS :: 9
  49. HUFFMAN_FAST_MASK :: ((1 << HUFFMAN_FAST_BITS) - 1)
  50. Z_LENGTH_BASE := [31]u16{
  51. 3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,
  52. 67,83,99,115,131,163,195,227,258,0,0,
  53. }
  54. Z_LENGTH_EXTRA := [31]u8{
  55. 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0,
  56. }
  57. Z_DIST_BASE := [32]u16{
  58. 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,
  59. 257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0,
  60. }
  61. Z_DIST_EXTRA := [32]u8{
  62. 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13,0,0,
  63. }
  64. Z_LENGTH_DEZIGZAG := []u8{
  65. 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15,
  66. }
  67. Z_FIXED_LENGTH := [288]u8{
  68. 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
  69. 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
  70. 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
  71. 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
  72. 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
  73. 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
  74. 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
  75. 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
  76. 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8,
  77. }
  78. Z_FIXED_DIST := [32]u8{
  79. 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
  80. }
  81. /*
  82. Accelerate all cases in default tables.
  83. */
  84. ZFAST_BITS :: 9
  85. ZFAST_MASK :: ((1 << ZFAST_BITS) - 1)
  86. /*
  87. ZLIB-style Huffman encoding.
  88. JPEG packs from left, ZLIB from right. We can't share code.
  89. */
  90. Huffman_Table :: struct {
  91. fast: [1 << ZFAST_BITS]u16,
  92. firstcode: [17]u16,
  93. maxcode: [17]int,
  94. firstsymbol: [17]u16,
  95. size: [288]u8,
  96. value: [288]u16,
  97. }
  98. // Implementation starts here
  99. @(optimization_mode="speed")
  100. z_bit_reverse :: #force_inline proc(n: u16, bits: u8) -> (r: u16) {
  101. assert(bits <= 16)
  102. // NOTE: Can optimize with llvm.bitreverse.i64 or some bit twiddling
  103. // by reversing all of the bits and masking out the unneeded ones.
  104. r = n
  105. r = ((r & 0xAAAA) >> 1) | ((r & 0x5555) << 1)
  106. r = ((r & 0xCCCC) >> 2) | ((r & 0x3333) << 2)
  107. r = ((r & 0xF0F0) >> 4) | ((r & 0x0F0F) << 4)
  108. r = ((r & 0xFF00) >> 8) | ((r & 0x00FF) << 8)
  109. r >>= (16 - bits)
  110. return
  111. }
  112. @(optimization_mode="speed")
  113. grow_buffer :: proc(buf: ^[dynamic]u8) -> (err: compress.Error) {
  114. /*
  115. That we get here at all means that we didn't pass an expected output size,
  116. or that it was too little.
  117. */
  118. /*
  119. Double until we reach the maximum allowed.
  120. */
  121. new_size := min(len(buf) << 1, compress.COMPRESS_OUTPUT_ALLOCATE_MAX)
  122. return resize(buf, new_size)
  123. }
  124. /*
  125. TODO: Make these return compress.Error.
  126. */
  127. @(optimization_mode="speed")
  128. write_byte :: #force_inline proc(z: ^$C, c: u8) -> (err: io.Error) #no_bounds_check {
  129. /*
  130. Resize if needed.
  131. */
  132. if int(z.bytes_written) + 1 >= len(z.output.buf) {
  133. e := grow_buffer(&z.output.buf)
  134. if e != nil {
  135. return .Short_Write
  136. }
  137. }
  138. #no_bounds_check {
  139. z.output.buf[z.bytes_written] = c
  140. }
  141. z.bytes_written += 1
  142. return .None
  143. }
  144. @(optimization_mode="speed")
  145. repl_byte :: proc(z: ^$C, count: u16, c: u8) -> (err: io.Error) #no_bounds_check {
  146. /*
  147. TODO(Jeroen): Once we have a magic ring buffer, we can just peek/write into it
  148. without having to worry about wrapping, so no need for a temp allocation to give to
  149. the output stream, just give it _that_ slice.
  150. */
  151. /*
  152. Resize if needed.
  153. */
  154. if int(z.bytes_written) + int(count) >= len(z.output.buf) {
  155. e := grow_buffer(&z.output.buf)
  156. if e != nil {
  157. return .Short_Write
  158. }
  159. }
  160. #no_bounds_check {
  161. for _ in 0..<count {
  162. z.output.buf[z.bytes_written] = c
  163. z.bytes_written += 1
  164. }
  165. }
  166. return .None
  167. }
  168. @(optimization_mode="speed")
  169. repl_bytes :: proc(z: ^$C, count: u16, distance: u16) -> (err: io.Error) {
  170. /*
  171. TODO(Jeroen): Once we have a magic ring buffer, we can just peek/write into it
  172. without having to worry about wrapping, so no need for a temp allocation to give to
  173. the output stream, just give it _that_ slice.
  174. */
  175. offset := i64(distance)
  176. if int(z.bytes_written) + int(count) >= len(z.output.buf) {
  177. e := grow_buffer(&z.output.buf)
  178. if e != nil {
  179. return .Short_Write
  180. }
  181. }
  182. #no_bounds_check {
  183. for _ in 0..<count {
  184. c := z.output.buf[z.bytes_written - offset]
  185. z.output.buf[z.bytes_written] = c
  186. z.bytes_written += 1
  187. }
  188. }
  189. return .None
  190. }
  191. allocate_huffman_table :: proc(allocator := context.allocator) -> (z: ^Huffman_Table, err: Error) {
  192. return new(Huffman_Table, allocator), nil
  193. }
  194. @(optimization_mode="speed")
  195. build_huffman :: proc(z: ^Huffman_Table, code_lengths: []u8) -> (err: Error) {
  196. sizes: [HUFFMAN_MAX_BITS+1]int
  197. next_code: [HUFFMAN_MAX_BITS+1]int
  198. k := int(0)
  199. mem.zero_slice(sizes[:])
  200. mem.zero_slice(z.fast[:])
  201. for v in code_lengths {
  202. sizes[v] += 1
  203. }
  204. sizes[0] = 0
  205. for i in 1 ..< HUFFMAN_MAX_BITS {
  206. if sizes[i] > (1 << uint(i)) {
  207. return .Huffman_Bad_Sizes
  208. }
  209. }
  210. code := int(0)
  211. for i in 1 ..= HUFFMAN_MAX_BITS {
  212. next_code[i] = code
  213. z.firstcode[i] = u16(code)
  214. z.firstsymbol[i] = u16(k)
  215. code = code + sizes[i]
  216. if sizes[i] != 0 {
  217. if code - 1 >= (1 << u16(i)) {
  218. return .Huffman_Bad_Code_Lengths
  219. }
  220. }
  221. z.maxcode[i] = code << (HUFFMAN_MAX_BITS - uint(i))
  222. code <<= 1
  223. k += int(sizes[i])
  224. }
  225. z.maxcode[HUFFMAN_MAX_BITS] = 0x10000 // Sentinel
  226. c: int
  227. for v, ci in code_lengths {
  228. if v != 0 {
  229. c = next_code[v] - int(z.firstcode[v]) + int(z.firstsymbol[v])
  230. fastv := u16((u16(v) << 9) | u16(ci))
  231. z.size[c] = u8(v)
  232. z.value[c] = u16(ci)
  233. if v <= ZFAST_BITS {
  234. j := z_bit_reverse(u16(next_code[v]), v)
  235. for j < (1 << ZFAST_BITS) {
  236. z.fast[j] = fastv
  237. j += (1 << v)
  238. }
  239. }
  240. next_code[v] += 1
  241. }
  242. }
  243. return nil
  244. }
  245. @(optimization_mode="speed")
  246. decode_huffman_slowpath :: proc(z: ^$C, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
  247. code := u16(compress.peek_bits_lsb(z,16))
  248. k := int(z_bit_reverse(code, 16))
  249. s: u8 = HUFFMAN_FAST_BITS+1
  250. for {
  251. #no_bounds_check if k < t.maxcode[s] {
  252. break
  253. }
  254. s += 1
  255. }
  256. if s >= 16 {
  257. return 0, .Bad_Huffman_Code
  258. }
  259. // code size is s, so:
  260. b := (k >> (16-s)) - int(t.firstcode[s]) + int(t.firstsymbol[s])
  261. if b >= size_of(t.size) {
  262. return 0, .Bad_Huffman_Code
  263. }
  264. if t.size[b] != s {
  265. return 0, .Bad_Huffman_Code
  266. }
  267. compress.consume_bits_lsb(z, s)
  268. r = t.value[b]
  269. return r, nil
  270. }
  271. @(optimization_mode="speed")
  272. decode_huffman :: proc(z: ^$C, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
  273. if z.num_bits < 16 {
  274. if z.num_bits > 63 {
  275. return 0, .Code_Buffer_Malformed
  276. }
  277. compress.refill_lsb(z)
  278. if z.num_bits > 63 {
  279. return 0, .Stream_Too_Short
  280. }
  281. }
  282. #no_bounds_check b := t.fast[z.code_buffer & ZFAST_MASK]
  283. if b != 0 {
  284. s := u8(b >> ZFAST_BITS)
  285. compress.consume_bits_lsb(z, s)
  286. return b & 511, nil
  287. }
  288. return decode_huffman_slowpath(z, t)
  289. }
  290. @(optimization_mode="speed")
  291. parse_huffman_block :: proc(z: ^$C, z_repeat, z_offset: ^Huffman_Table) -> (err: Error) #no_bounds_check {
  292. #no_bounds_check for {
  293. value, e := decode_huffman(z, z_repeat)
  294. if e != nil {
  295. return err
  296. }
  297. if value < 256 {
  298. e := write_byte(z, u8(value))
  299. if e != .None {
  300. return .Output_Too_Short
  301. }
  302. } else {
  303. if value == 256 {
  304. // End of block
  305. return nil
  306. }
  307. value -= 257
  308. length := Z_LENGTH_BASE[value]
  309. if Z_LENGTH_EXTRA[value] > 0 {
  310. length += u16(compress.read_bits_lsb(z, Z_LENGTH_EXTRA[value]))
  311. }
  312. value, e = decode_huffman(z, z_offset)
  313. if e != nil {
  314. return .Bad_Huffman_Code
  315. }
  316. distance := Z_DIST_BASE[value]
  317. if Z_DIST_EXTRA[value] > 0 {
  318. distance += u16(compress.read_bits_lsb(z, Z_DIST_EXTRA[value]))
  319. }
  320. if z.bytes_written < i64(distance) {
  321. // Distance is longer than we've decoded so far.
  322. return .Bad_Distance
  323. }
  324. /*
  325. These might be sped up with a repl_byte call that copies
  326. from the already written output more directly, and that
  327. update the Adler checksum once after.
  328. That way we'd suffer less Stream vtable overhead.
  329. */
  330. if distance == 1 {
  331. /*
  332. Replicate the last outputted byte, length times.
  333. */
  334. if length > 0 {
  335. c := z.output.buf[z.bytes_written - i64(distance)]
  336. e := repl_byte(z, length, c)
  337. if e != .None {
  338. return .Output_Too_Short
  339. }
  340. }
  341. } else {
  342. if length > 0 {
  343. e := repl_bytes(z, length, distance)
  344. if e != .None {
  345. return .Output_Too_Short
  346. }
  347. }
  348. }
  349. }
  350. }
  351. }
  352. @(optimization_mode="speed")
  353. inflate_from_context :: proc(using ctx: ^compress.Context_Memory_Input, raw := false, expected_output_size := -1, allocator := context.allocator) -> (err: Error) #no_bounds_check {
  354. /*
  355. ctx.output must be a bytes.Buffer for now. We'll add a separate implementation that writes to a stream.
  356. raw determines whether the ZLIB header is processed, or we're inflating a raw
  357. DEFLATE stream.
  358. */
  359. if !raw {
  360. size, size_err := compress.input_size(ctx)
  361. if size < 6 || size_err != nil {
  362. return .Stream_Too_Short
  363. }
  364. cmf, _ := compress.read_u8(ctx)
  365. method := Compression_Method(cmf & 0xf)
  366. if method != .DEFLATE {
  367. return .Unknown_Compression_Method
  368. }
  369. if cinfo := (cmf >> 4) & 0xf; cinfo > 7 {
  370. return .Unsupported_Window_Size
  371. }
  372. flg, _ := compress.read_u8(ctx)
  373. fcheck := flg & 0x1f
  374. fcheck_computed := (cmf << 8 | flg) & 0x1f
  375. if fcheck != fcheck_computed {
  376. return .Checksum_Failed
  377. }
  378. /*
  379. We don't handle built-in dictionaries for now.
  380. They're application specific and PNG doesn't use them.
  381. */
  382. if fdict := (flg >> 5) & 1; fdict != 0 {
  383. return .FDICT_Unsupported
  384. }
  385. // flevel := Compression_Level((flg >> 6) & 3);
  386. /*
  387. Inflate can consume bits belonging to the Adler checksum.
  388. We pass the entire stream to Inflate and will unget bytes if we need to
  389. at the end to compare checksums.
  390. */
  391. }
  392. // Parse ZLIB stream without header.
  393. inflate_raw(ctx, expected_output_size=expected_output_size) or_return
  394. if !raw {
  395. compress.discard_to_next_byte_lsb(ctx)
  396. adler_b: [4]u8
  397. for _, i in adler_b {
  398. adler_b[i], _ = compress.read_u8_prefer_code_buffer_lsb(ctx)
  399. }
  400. adler := transmute(u32be)adler_b
  401. output_hash := hash.adler32(ctx.output.buf[:])
  402. if output_hash != u32(adler) {
  403. return .Checksum_Failed
  404. }
  405. }
  406. return nil
  407. }
  408. // TODO: Check alignment of reserve/resize.
  409. @(optimization_mode="speed")
  410. inflate_raw :: proc(z: ^$C, expected_output_size := -1, allocator := context.allocator) -> (err: Error) #no_bounds_check {
  411. context.allocator = allocator
  412. expected_output_size := expected_output_size
  413. /*
  414. Always set up a minimum allocation size.
  415. */
  416. expected_output_size = max(max(expected_output_size, compress.COMPRESS_OUTPUT_ALLOCATE_MIN), 512)
  417. // fmt.printf("\nZLIB: Expected Payload Size: %v\n\n", expected_output_size);
  418. if expected_output_size > 0 && expected_output_size <= compress.COMPRESS_OUTPUT_ALLOCATE_MAX {
  419. /*
  420. Try to pre-allocate the output buffer.
  421. */
  422. reserve(&z.output.buf, expected_output_size) or_return
  423. resize (&z.output.buf, expected_output_size) or_return
  424. }
  425. if len(z.output.buf) != expected_output_size {
  426. return .Resize_Failed
  427. }
  428. z.num_bits = 0
  429. z.code_buffer = 0
  430. z_repeat: ^Huffman_Table
  431. z_offset: ^Huffman_Table
  432. codelength_ht: ^Huffman_Table
  433. defer free(z_repeat)
  434. defer free(z_offset)
  435. defer free(codelength_ht)
  436. z_repeat = allocate_huffman_table() or_return
  437. z_offset = allocate_huffman_table() or_return
  438. codelength_ht = allocate_huffman_table() or_return
  439. final := u32(0)
  440. type := u32(0)
  441. for {
  442. final = compress.read_bits_lsb(z, 1)
  443. type = compress.read_bits_lsb(z, 2)
  444. // fmt.printf("Final: %v | Type: %v\n", final, type)
  445. switch type {
  446. case 0:
  447. // fmt.printf("Method 0: STORED\n")
  448. // Uncompressed block
  449. // Discard bits until next byte boundary
  450. compress.discard_to_next_byte_lsb(z)
  451. uncompressed_len := u16(compress.read_bits_lsb(z, 16))
  452. length_check := u16(compress.read_bits_lsb(z, 16))
  453. // fmt.printf("LEN: %v, ~LEN: %v, NLEN: %v, ~NLEN: %v\n", uncompressed_len, ~uncompressed_len, length_check, ~length_check)
  454. if ~uncompressed_len != length_check {
  455. return .Len_Nlen_Mismatch
  456. }
  457. /*
  458. TODO: Maybe speed this up with a stream-to-stream copy (read_from)
  459. and a single Adler32 update after.
  460. */
  461. #no_bounds_check for uncompressed_len > 0 {
  462. compress.refill_lsb(z)
  463. lit := compress.read_bits_lsb(z, 8)
  464. write_byte(z, u8(lit))
  465. uncompressed_len -= 1
  466. }
  467. assert(uncompressed_len == 0)
  468. case 3:
  469. return .BType_3
  470. case:
  471. // fmt.printf("Err: %v | Final: %v | Type: %v\n", err, final, type)
  472. if type == 1 {
  473. // Use fixed code lengths.
  474. build_huffman(z_repeat, Z_FIXED_LENGTH[:]) or_return
  475. build_huffman(z_offset, Z_FIXED_DIST[:]) or_return
  476. } else {
  477. lencodes: [286+32+137]u8
  478. codelength_sizes: [19]u8
  479. //i: u32;
  480. n: u32
  481. compress.refill_lsb(z, 14)
  482. hlit := compress.read_bits_no_refill_lsb(z, 5) + 257
  483. hdist := compress.read_bits_no_refill_lsb(z, 5) + 1
  484. hclen := compress.read_bits_no_refill_lsb(z, 4) + 4
  485. ntot := hlit + hdist
  486. #no_bounds_check for i in 0..<hclen {
  487. s := compress.read_bits_lsb(z, 3)
  488. codelength_sizes[Z_LENGTH_DEZIGZAG[i]] = u8(s)
  489. }
  490. build_huffman(codelength_ht, codelength_sizes[:]) or_return
  491. n = 0
  492. c: u16
  493. for n < ntot {
  494. c = decode_huffman(z, codelength_ht) or_return
  495. if c < 0 || c >= 19 {
  496. return .Huffman_Bad_Code_Lengths
  497. }
  498. if c < 16 {
  499. lencodes[n] = u8(c)
  500. n += 1
  501. } else {
  502. fill := u8(0)
  503. compress.refill_lsb(z, 7)
  504. switch c {
  505. case 16:
  506. c = u16(compress.read_bits_no_refill_lsb(z, 2) + 3)
  507. if n == 0 {
  508. return .Huffman_Bad_Code_Lengths
  509. }
  510. fill = lencodes[n - 1]
  511. case 17:
  512. c = u16(compress.read_bits_no_refill_lsb(z, 3) + 3)
  513. case 18:
  514. c = u16(compress.read_bits_no_refill_lsb(z, 7) + 11)
  515. case:
  516. return .Huffman_Bad_Code_Lengths
  517. }
  518. if ntot - n < u32(c) {
  519. return .Huffman_Bad_Code_Lengths
  520. }
  521. nc := n + u32(c)
  522. #no_bounds_check for ; n < nc; n += 1 {
  523. lencodes[n] = fill
  524. }
  525. }
  526. }
  527. if n != ntot {
  528. return .Huffman_Bad_Code_Lengths
  529. }
  530. build_huffman(z_repeat, lencodes[:hlit]) or_return
  531. build_huffman(z_offset, lencodes[hlit:ntot]) or_return
  532. }
  533. parse_huffman_block(z, z_repeat, z_offset) or_return
  534. }
  535. if final == 1 {
  536. break
  537. }
  538. }
  539. if int(z.bytes_written) != len(z.output.buf) {
  540. resize(&z.output.buf, int(z.bytes_written)) or_return
  541. }
  542. return nil
  543. }
  544. inflate_from_byte_array :: proc(input: []u8, buf: ^bytes.Buffer, raw := false, expected_output_size := -1) -> (err: Error) {
  545. ctx := compress.Context_Memory_Input{}
  546. ctx.input_data = input
  547. ctx.output = buf
  548. return inflate_from_context(&ctx, raw=raw, expected_output_size=expected_output_size)
  549. }
  550. inflate_from_byte_array_raw :: proc(input: []u8, buf: ^bytes.Buffer, raw := false, expected_output_size := -1) -> (err: Error) {
  551. ctx := compress.Context_Memory_Input{}
  552. ctx.input_data = input
  553. ctx.output = buf
  554. return inflate_raw(&ctx, expected_output_size=expected_output_size)
  555. }
  556. inflate :: proc{inflate_from_context, inflate_from_byte_array}