zlib.odin 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715
  1. package zlib
  2. /*
  3. Copyright 2021 Jeroen van Rijn <[email protected]>.
  4. Made available under Odin's BSD-2 license.
  5. List of contributors:
  6. Jeroen van Rijn: Initial implementation, optimization.
  7. Ginger Bill: Cosmetic changes.
  8. */
  9. import "core:compress"
  10. import "core:mem"
  11. import "core:io"
  12. import "core:hash"
  13. import "core:bytes"
  14. /*
  15. zlib.inflate decompresses a ZLIB stream passed in as a []u8 or io.Stream.
  16. Returns: Error.
  17. */
  18. /*
  19. Do we do Adler32 as we write bytes to output?
  20. It used to be faster to do it inline, now it's faster to do it at the end of `inflate`.
  21. We'll see what's faster after more optimization, and might end up removing
  22. `Context.rolling_hash` if not inlining it is still faster.
  23. */
  24. Compression_Method :: enum u8 {
  25. DEFLATE = 8,
  26. Reserved = 15,
  27. }
  28. Compression_Level :: enum u8 {
  29. Fastest = 0,
  30. Fast = 1,
  31. Default = 2,
  32. Maximum = 3,
  33. }
  34. Options :: struct {
  35. window_size: u16,
  36. level: u8,
  37. }
  38. Error :: compress.Error;
  39. E_General :: compress.General_Error;
  40. E_ZLIB :: compress.ZLIB_Error;
  41. E_Deflate :: compress.Deflate_Error;
  42. DEFLATE_MAX_CHUNK_SIZE :: 65535;
  43. DEFLATE_MAX_LITERAL_SIZE :: 65535;
  44. DEFLATE_MAX_DISTANCE :: 32768;
  45. DEFLATE_MAX_LENGTH :: 258;
  46. HUFFMAN_MAX_BITS :: 16;
  47. HUFFMAN_FAST_BITS :: 9;
  48. HUFFMAN_FAST_MASK :: ((1 << HUFFMAN_FAST_BITS) - 1);
  49. Z_LENGTH_BASE := [31]u16{
  50. 3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,
  51. 67,83,99,115,131,163,195,227,258,0,0,
  52. };
  53. Z_LENGTH_EXTRA := [31]u8{
  54. 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0,
  55. };
  56. Z_DIST_BASE := [32]u16{
  57. 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,
  58. 257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0,
  59. };
  60. Z_DIST_EXTRA := [32]u8{
  61. 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13,0,0,
  62. };
  63. Z_LENGTH_DEZIGZAG := []u8{
  64. 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15,
  65. };
  66. Z_FIXED_LENGTH := [288]u8{
  67. 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
  68. 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
  69. 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
  70. 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
  71. 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
  72. 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
  73. 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
  74. 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
  75. 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8,
  76. };
  77. Z_FIXED_DIST := [32]u8{
  78. 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
  79. };
  80. /*
  81. Accelerate all cases in default tables.
  82. */
  83. ZFAST_BITS :: 9;
  84. ZFAST_MASK :: ((1 << ZFAST_BITS) - 1);
  85. /*
  86. ZLIB-style Huffman encoding.
  87. JPEG packs from left, ZLIB from right. We can't share code.
  88. */
  89. Huffman_Table :: struct {
  90. fast: [1 << ZFAST_BITS]u16,
  91. firstcode: [16]u16,
  92. maxcode: [17]int,
  93. firstsymbol: [16]u16,
  94. size: [288]u8,
  95. value: [288]u16,
  96. };
  97. // Implementation starts here
  98. @(optimization_mode="speed")
  99. z_bit_reverse :: #force_inline proc(n: u16, bits: u8) -> (r: u16) {
  100. assert(bits <= 16);
  101. // NOTE: Can optimize with llvm.bitreverse.i64 or some bit twiddling
  102. // by reversing all of the bits and masking out the unneeded ones.
  103. r = n;
  104. r = ((r & 0xAAAA) >> 1) | ((r & 0x5555) << 1);
  105. r = ((r & 0xCCCC) >> 2) | ((r & 0x3333) << 2);
  106. r = ((r & 0xF0F0) >> 4) | ((r & 0x0F0F) << 4);
  107. r = ((r & 0xFF00) >> 8) | ((r & 0x00FF) << 8);
  108. r >>= (16 - bits);
  109. return;
  110. }
  111. @(optimization_mode="speed")
  112. grow_buffer :: proc(buf: ^[dynamic]u8) -> (err: compress.Error) {
  113. /*
  114. That we get here at all means that we didn't pass an expected output size,
  115. or that it was too little.
  116. */
  117. /*
  118. Double until we reach the maximum allowed.
  119. */
  120. new_size := min(len(buf) << 1, compress.COMPRESS_OUTPUT_ALLOCATE_MAX);
  121. resize(buf, new_size);
  122. if len(buf) != new_size {
  123. /*
  124. Resize failed.
  125. */
  126. return .Resize_Failed;
  127. }
  128. return nil;
  129. }
  130. /*
  131. TODO: Make these return compress.Error.
  132. */
  133. @(optimization_mode="speed")
  134. write_byte :: #force_inline proc(z: ^$C, c: u8) -> (err: io.Error) #no_bounds_check {
  135. /*
  136. Resize if needed.
  137. */
  138. if int(z.bytes_written) + 1 >= len(z.output.buf) {
  139. e := grow_buffer(&z.output.buf);
  140. if e != nil {
  141. return .Short_Write;
  142. }
  143. }
  144. #no_bounds_check {
  145. z.output.buf[z.bytes_written] = c;
  146. }
  147. z.bytes_written += 1;
  148. return .None;
  149. }
  150. @(optimization_mode="speed")
  151. repl_byte :: proc(z: ^$C, count: u16, c: u8) -> (err: io.Error) #no_bounds_check {
  152. /*
  153. TODO(Jeroen): Once we have a magic ring buffer, we can just peek/write into it
  154. without having to worry about wrapping, so no need for a temp allocation to give to
  155. the output stream, just give it _that_ slice.
  156. */
  157. /*
  158. Resize if needed.
  159. */
  160. if int(z.bytes_written) + int(count) >= len(z.output.buf) {
  161. e := grow_buffer(&z.output.buf);
  162. if e != nil {
  163. return .Short_Write;
  164. }
  165. }
  166. #no_bounds_check {
  167. for _ in 0..<count {
  168. z.output.buf[z.bytes_written] = c;
  169. z.bytes_written += 1;
  170. }
  171. }
  172. return .None;
  173. }
  174. @(optimization_mode="speed")
  175. repl_bytes :: proc(z: ^$C, count: u16, distance: u16) -> (err: io.Error) {
  176. /*
  177. TODO(Jeroen): Once we have a magic ring buffer, we can just peek/write into it
  178. without having to worry about wrapping, so no need for a temp allocation to give to
  179. the output stream, just give it _that_ slice.
  180. */
  181. offset := i64(distance);
  182. if int(z.bytes_written) + int(count) >= len(z.output.buf) {
  183. e := grow_buffer(&z.output.buf);
  184. if e != nil {
  185. return .Short_Write;
  186. }
  187. }
  188. #no_bounds_check {
  189. for _ in 0..<count {
  190. c := z.output.buf[z.bytes_written - offset];
  191. z.output.buf[z.bytes_written] = c;
  192. z.bytes_written += 1;
  193. }
  194. }
  195. return .None;
  196. }
  197. allocate_huffman_table :: proc(allocator := context.allocator) -> (z: ^Huffman_Table, err: Error) {
  198. return new(Huffman_Table, allocator), nil;
  199. }
  200. @(optimization_mode="speed")
  201. build_huffman :: proc(z: ^Huffman_Table, code_lengths: []u8) -> (err: Error) {
  202. sizes: [HUFFMAN_MAX_BITS+1]int;
  203. next_code: [HUFFMAN_MAX_BITS]int;
  204. k := int(0);
  205. mem.zero_slice(sizes[:]);
  206. mem.zero_slice(z.fast[:]);
  207. for v in code_lengths {
  208. sizes[v] += 1;
  209. }
  210. sizes[0] = 0;
  211. for i in 1..<(HUFFMAN_MAX_BITS+1) {
  212. if sizes[i] > (1 << uint(i)) {
  213. return E_Deflate.Huffman_Bad_Sizes;
  214. }
  215. }
  216. code := int(0);
  217. for i in 1..<HUFFMAN_MAX_BITS {
  218. next_code[i] = code;
  219. z.firstcode[i] = u16(code);
  220. z.firstsymbol[i] = u16(k);
  221. code = code + sizes[i];
  222. if sizes[i] != 0 {
  223. if code - 1 >= (1 << u16(i)) {
  224. return E_Deflate.Huffman_Bad_Code_Lengths;
  225. }
  226. }
  227. z.maxcode[i] = code << (HUFFMAN_MAX_BITS - uint(i));
  228. code <<= 1;
  229. k += int(sizes[i]);
  230. }
  231. z.maxcode[HUFFMAN_MAX_BITS] = 0x10000; // Sentinel
  232. c: int;
  233. for v, ci in code_lengths {
  234. if v != 0 {
  235. c = next_code[v] - int(z.firstcode[v]) + int(z.firstsymbol[v]);
  236. fastv := u16((u16(v) << 9) | u16(ci));
  237. z.size[c] = u8(v);
  238. z.value[c] = u16(ci);
  239. if v <= ZFAST_BITS {
  240. j := z_bit_reverse(u16(next_code[v]), v);
  241. for j < (1 << ZFAST_BITS) {
  242. z.fast[j] = fastv;
  243. j += (1 << v);
  244. }
  245. }
  246. next_code[v] += 1;
  247. }
  248. }
  249. return nil;
  250. }
  251. @(optimization_mode="speed")
  252. decode_huffman_slowpath :: proc(z: ^$C, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
  253. code := u16(compress.peek_bits_lsb(z,16));
  254. k := int(z_bit_reverse(code, 16));
  255. s: u8;
  256. #no_bounds_check for s = HUFFMAN_FAST_BITS+1; ; {
  257. if k < t.maxcode[s] {
  258. break;
  259. }
  260. s += 1;
  261. }
  262. if s >= 16 {
  263. return 0, E_Deflate.Bad_Huffman_Code;
  264. }
  265. // code size is s, so:
  266. b := (k >> (16-s)) - int(t.firstcode[s]) + int(t.firstsymbol[s]);
  267. if b >= size_of(t.size) {
  268. return 0, E_Deflate.Bad_Huffman_Code;
  269. }
  270. if t.size[b] != s {
  271. return 0, E_Deflate.Bad_Huffman_Code;
  272. }
  273. compress.consume_bits_lsb(z, s);
  274. r = t.value[b];
  275. return r, nil;
  276. }
  277. @(optimization_mode="speed")
  278. decode_huffman :: proc(z: ^$C, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
  279. if z.num_bits < 16 {
  280. if z.num_bits > 63 {
  281. return 0, E_ZLIB.Code_Buffer_Malformed;
  282. }
  283. compress.refill_lsb(z);
  284. if z.num_bits > 63 {
  285. return 0, E_General.Stream_Too_Short;
  286. }
  287. }
  288. #no_bounds_check b := t.fast[z.code_buffer & ZFAST_MASK];
  289. if b != 0 {
  290. s := u8(b >> ZFAST_BITS);
  291. compress.consume_bits_lsb(z, s);
  292. return b & 511, nil;
  293. }
  294. return decode_huffman_slowpath(z, t);
  295. }
  296. @(optimization_mode="speed")
  297. parse_huffman_block :: proc(z: ^$C, z_repeat, z_offset: ^Huffman_Table) -> (err: Error) #no_bounds_check {
  298. #no_bounds_check for {
  299. value, e := decode_huffman(z, z_repeat);
  300. if e != nil {
  301. return err;
  302. }
  303. if value < 256 {
  304. e := write_byte(z, u8(value));
  305. if e != .None {
  306. return E_General.Output_Too_Short;
  307. }
  308. } else {
  309. if value == 256 {
  310. // End of block
  311. return nil;
  312. }
  313. value -= 257;
  314. length := Z_LENGTH_BASE[value];
  315. if Z_LENGTH_EXTRA[value] > 0 {
  316. length += u16(compress.read_bits_lsb(z, Z_LENGTH_EXTRA[value]));
  317. }
  318. value, e = decode_huffman(z, z_offset);
  319. if e != nil {
  320. return E_Deflate.Bad_Huffman_Code;
  321. }
  322. distance := Z_DIST_BASE[value];
  323. if Z_DIST_EXTRA[value] > 0 {
  324. distance += u16(compress.read_bits_lsb(z, Z_DIST_EXTRA[value]));
  325. }
  326. if z.bytes_written < i64(distance) {
  327. // Distance is longer than we've decoded so far.
  328. return E_Deflate.Bad_Distance;
  329. }
  330. /*
  331. These might be sped up with a repl_byte call that copies
  332. from the already written output more directly, and that
  333. update the Adler checksum once after.
  334. That way we'd suffer less Stream vtable overhead.
  335. */
  336. if distance == 1 {
  337. /*
  338. Replicate the last outputted byte, length times.
  339. */
  340. if length > 0 {
  341. c := z.output.buf[z.bytes_written - i64(distance)];
  342. e := repl_byte(z, length, c);
  343. if e != .None {
  344. return E_General.Output_Too_Short;
  345. }
  346. }
  347. } else {
  348. if length > 0 {
  349. e := repl_bytes(z, length, distance);
  350. if e != .None {
  351. return E_General.Output_Too_Short;
  352. }
  353. }
  354. }
  355. }
  356. }
  357. }
  358. @(optimization_mode="speed")
  359. inflate_from_context :: proc(using ctx: ^compress.Context_Memory_Input, raw := false, expected_output_size := -1, allocator := context.allocator) -> (err: Error) #no_bounds_check {
  360. /*
  361. ctx.output must be a bytes.Buffer for now. We'll add a separate implementation that writes to a stream.
  362. raw determines whether the ZLIB header is processed, or we're inflating a raw
  363. DEFLATE stream.
  364. */
  365. if !raw {
  366. size, size_err := compress.input_size(ctx);
  367. if size < 6 || size_err != nil {
  368. return E_General.Stream_Too_Short;
  369. }
  370. cmf, _ := compress.read_u8(ctx);
  371. method := Compression_Method(cmf & 0xf);
  372. if method != .DEFLATE {
  373. return E_General.Unknown_Compression_Method;
  374. }
  375. cinfo := (cmf >> 4) & 0xf;
  376. if cinfo > 7 {
  377. return E_ZLIB.Unsupported_Window_Size;
  378. }
  379. flg, _ := compress.read_u8(ctx);
  380. fcheck := flg & 0x1f;
  381. fcheck_computed := (cmf << 8 | flg) & 0x1f;
  382. if fcheck != fcheck_computed {
  383. return E_General.Checksum_Failed;
  384. }
  385. fdict := (flg >> 5) & 1;
  386. /*
  387. We don't handle built-in dictionaries for now.
  388. They're application specific and PNG doesn't use them.
  389. */
  390. if fdict != 0 {
  391. return E_ZLIB.FDICT_Unsupported;
  392. }
  393. // flevel := Compression_Level((flg >> 6) & 3);
  394. /*
  395. Inflate can consume bits belonging to the Adler checksum.
  396. We pass the entire stream to Inflate and will unget bytes if we need to
  397. at the end to compare checksums.
  398. */
  399. }
  400. // Parse ZLIB stream without header.
  401. err = inflate_raw(z=ctx, expected_output_size=expected_output_size);
  402. if err != nil {
  403. return err;
  404. }
  405. if !raw {
  406. compress.discard_to_next_byte_lsb(ctx);
  407. adler_b: [4]u8;
  408. for _, i in adler_b {
  409. adler_b[i], _ = compress.read_u8_prefer_code_buffer_lsb(ctx);
  410. }
  411. adler := transmute(u32be)adler_b;
  412. output_hash := hash.adler32(ctx.output.buf[:]);
  413. if output_hash != u32(adler) {
  414. return E_General.Checksum_Failed;
  415. }
  416. }
  417. return nil;
  418. }
  419. // TODO: Check alignment of reserve/resize.
  420. @(optimization_mode="speed")
  421. inflate_raw :: proc(z: ^$C, expected_output_size := -1, allocator := context.allocator) -> (err: Error) #no_bounds_check {
  422. expected_output_size := expected_output_size;
  423. /*
  424. Always set up a minimum allocation size.
  425. */
  426. expected_output_size = max(max(expected_output_size, compress.COMPRESS_OUTPUT_ALLOCATE_MIN), 512);
  427. // fmt.printf("\nZLIB: Expected Payload Size: %v\n\n", expected_output_size);
  428. if expected_output_size > 0 && expected_output_size <= compress.COMPRESS_OUTPUT_ALLOCATE_MAX {
  429. /*
  430. Try to pre-allocate the output buffer.
  431. */
  432. reserve(&z.output.buf, expected_output_size);
  433. resize (&z.output.buf, expected_output_size);
  434. };
  435. if len(z.output.buf) != expected_output_size {
  436. return .Resize_Failed;
  437. }
  438. z.num_bits = 0;
  439. z.code_buffer = 0;
  440. z_repeat: ^Huffman_Table;
  441. z_offset: ^Huffman_Table;
  442. codelength_ht: ^Huffman_Table;
  443. z_repeat, err = allocate_huffman_table(allocator=context.allocator);
  444. if err != nil {
  445. return err;
  446. }
  447. z_offset, err = allocate_huffman_table(allocator=context.allocator);
  448. if err != nil {
  449. return err;
  450. }
  451. codelength_ht, err = allocate_huffman_table(allocator=context.allocator);
  452. if err != nil {
  453. return err;
  454. }
  455. defer free(z_repeat);
  456. defer free(z_offset);
  457. defer free(codelength_ht);
  458. final := u32(0);
  459. type := u32(0);
  460. for {
  461. final = compress.read_bits_lsb(z, 1);
  462. type = compress.read_bits_lsb(z, 2);
  463. // fmt.printf("Final: %v | Type: %v\n", final, type);
  464. switch type {
  465. case 0:
  466. // Uncompressed block
  467. // Discard bits until next byte boundary
  468. compress.discard_to_next_byte_lsb(z);
  469. uncompressed_len := i16(compress.read_bits_lsb(z, 16));
  470. length_check := i16(compress.read_bits_lsb(z, 16));
  471. // fmt.printf("LEN: %v, ~LEN: %v, NLEN: %v, ~NLEN: %v\n", uncompressed_len, ~uncompressed_len, length_check, ~length_check);
  472. if ~uncompressed_len != length_check {
  473. return E_Deflate.Len_Nlen_Mismatch;
  474. }
  475. /*
  476. TODO: Maybe speed this up with a stream-to-stream copy (read_from)
  477. and a single Adler32 update after.
  478. */
  479. #no_bounds_check for uncompressed_len > 0 {
  480. compress.refill_lsb(z);
  481. lit := compress.read_bits_lsb(z, 8);
  482. write_byte(z, u8(lit));
  483. uncompressed_len -= 1;
  484. }
  485. case 3:
  486. return E_Deflate.BType_3;
  487. case:
  488. // log.debugf("Err: %v | Final: %v | Type: %v\n", err, final, type);
  489. if type == 1 {
  490. // Use fixed code lengths.
  491. err = build_huffman(z_repeat, Z_FIXED_LENGTH[:]);
  492. if err != nil {
  493. return err;
  494. }
  495. err = build_huffman(z_offset, Z_FIXED_DIST[:]);
  496. if err != nil {
  497. return err;
  498. }
  499. } else {
  500. lencodes: [286+32+137]u8;
  501. codelength_sizes: [19]u8;
  502. //i: u32;
  503. n: u32;
  504. compress.refill_lsb(z, 14);
  505. hlit := compress.read_bits_no_refill_lsb(z, 5) + 257;
  506. hdist := compress.read_bits_no_refill_lsb(z, 5) + 1;
  507. hclen := compress.read_bits_no_refill_lsb(z, 4) + 4;
  508. ntot := hlit + hdist;
  509. #no_bounds_check for i in 0..<hclen {
  510. s := compress.read_bits_lsb(z, 3);
  511. codelength_sizes[Z_LENGTH_DEZIGZAG[i]] = u8(s);
  512. }
  513. err = build_huffman(codelength_ht, codelength_sizes[:]);
  514. if err != nil {
  515. return err;
  516. }
  517. n = 0;
  518. c: u16;
  519. for n < ntot {
  520. c, err = decode_huffman(z, codelength_ht);
  521. if err != nil {
  522. return err;
  523. }
  524. if c < 0 || c >= 19 {
  525. return E_Deflate.Huffman_Bad_Code_Lengths;
  526. }
  527. if c < 16 {
  528. lencodes[n] = u8(c);
  529. n += 1;
  530. } else {
  531. fill := u8(0);
  532. compress.refill_lsb(z, 7);
  533. switch c {
  534. case 16:
  535. c = u16(compress.read_bits_no_refill_lsb(z, 2) + 3);
  536. if n == 0 {
  537. return E_Deflate.Huffman_Bad_Code_Lengths;
  538. }
  539. fill = lencodes[n - 1];
  540. case 17:
  541. c = u16(compress.read_bits_no_refill_lsb(z, 3) + 3);
  542. case 18:
  543. c = u16(compress.read_bits_no_refill_lsb(z, 7) + 11);
  544. case:
  545. return E_Deflate.Huffman_Bad_Code_Lengths;
  546. }
  547. if ntot - n < u32(c) {
  548. return E_Deflate.Huffman_Bad_Code_Lengths;
  549. }
  550. nc := n + u32(c);
  551. #no_bounds_check for ; n < nc; n += 1 {
  552. lencodes[n] = fill;
  553. }
  554. }
  555. }
  556. if n != ntot {
  557. return E_Deflate.Huffman_Bad_Code_Lengths;
  558. }
  559. err = build_huffman(z_repeat, lencodes[:hlit]);
  560. if err != nil {
  561. return err;
  562. }
  563. err = build_huffman(z_offset, lencodes[hlit:ntot]);
  564. if err != nil {
  565. return err;
  566. }
  567. }
  568. err = parse_huffman_block(z, z_repeat, z_offset);
  569. // log.debugf("Err: %v | Final: %v | Type: %v\n", err, final, type);
  570. if err != nil {
  571. return err;
  572. }
  573. }
  574. if final == 1 {
  575. break;
  576. }
  577. }
  578. if int(z.bytes_written) != len(z.output.buf) {
  579. resize(&z.output.buf, int(z.bytes_written));
  580. }
  581. return nil;
  582. }
  583. inflate_from_byte_array :: proc(input: []u8, buf: ^bytes.Buffer, raw := false, expected_output_size := -1) -> (err: Error) {
  584. ctx := compress.Context_Memory_Input{};
  585. ctx.input_data = input;
  586. ctx.output = buf;
  587. err = inflate_from_context(ctx=&ctx, raw=raw, expected_output_size=expected_output_size);
  588. return err;
  589. }
  590. inflate_from_byte_array_raw :: proc(input: []u8, buf: ^bytes.Buffer, raw := false, expected_output_size := -1) -> (err: Error) {
  591. ctx := compress.Context_Memory_Input{};
  592. ctx.input_data = input;
  593. ctx.output = buf;
  594. return inflate_raw(z=&ctx, expected_output_size=expected_output_size);
  595. }
  596. inflate :: proc{inflate_from_context, inflate_from_byte_array};