|
@@ -13,10 +13,8 @@ import "core:compress"
|
|
|
|
|
|
import "core:mem"
|
|
|
import "core:io"
|
|
|
-import "core:bytes"
|
|
|
import "core:hash"
|
|
|
-
|
|
|
-// when #config(TRACY_ENABLE, false) { import tracy "shared:odin-tracy" }
|
|
|
+import "core:bytes"
|
|
|
|
|
|
/*
|
|
|
zlib.inflate decompresses a ZLIB stream passed in as a []u8 or io.Stream.
|
|
@@ -31,10 +29,6 @@ import "core:hash"
|
|
|
`Context.rolling_hash` if not inlining it is still faster.
|
|
|
|
|
|
*/
|
|
|
-INLINE_ADLER :: false;
|
|
|
-
|
|
|
-Context :: compress.Context;
|
|
|
-Code_Buffer :: compress.Code_Buffer;
|
|
|
|
|
|
Compression_Method :: enum u8 {
|
|
|
DEFLATE = 8,
|
|
@@ -140,70 +134,105 @@ z_bit_reverse :: #force_inline proc(n: u16, bits: u8) -> (r: u16) {
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
+
|
|
|
+@(optimization_mode="speed")
|
|
|
+grow_buffer :: proc(buf: ^[dynamic]u8) -> (err: compress.Error) {
|
|
|
+ /*
|
|
|
+ That we get here at all means that we didn't pass an expected output size,
|
|
|
+ or that it was too little.
|
|
|
+ */
|
|
|
+
|
|
|
+ /*
|
|
|
+ Double until we reach the maximum allowed.
|
|
|
+ */
|
|
|
+ new_size := min(len(buf) << 1, compress.COMPRESS_OUTPUT_ALLOCATE_MAX);
|
|
|
+ resize(buf, new_size);
|
|
|
+ if len(buf) != new_size {
|
|
|
+ /*
|
|
|
+ Resize failed.
|
|
|
+ */
|
|
|
+ return .Resize_Failed;
|
|
|
+ }
|
|
|
+
|
|
|
+ return nil;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ TODO: Make these return compress.Error.
|
|
|
+*/
|
|
|
+
|
|
|
@(optimization_mode="speed")
|
|
|
-write_byte :: #force_inline proc(z: ^Context, cb: ^Code_Buffer, c: u8) -> (err: io.Error) #no_bounds_check {
|
|
|
- when #config(TRACY_ENABLE, false) { tracy.ZoneN("Write Byte"); }
|
|
|
- c := c;
|
|
|
- buf := transmute([]u8)mem.Raw_Slice{data=&c, len=1};
|
|
|
- when INLINE_ADLER { z.rolling_hash = hash.adler32(buf, z.rolling_hash); }
|
|
|
-
|
|
|
- _, e := z.output->impl_write(buf);
|
|
|
- if e != .None {
|
|
|
- return e;
|
|
|
+write_byte :: #force_inline proc(z: ^$C, c: u8) -> (err: io.Error) #no_bounds_check {
|
|
|
+ /*
|
|
|
+ Resize if needed.
|
|
|
+ */
|
|
|
+ if int(z.bytes_written) + 1 >= len(z.output.buf) {
|
|
|
+ e := grow_buffer(&z.output.buf);
|
|
|
+ if e != nil {
|
|
|
+ return .Short_Write;
|
|
|
+ }
|
|
|
}
|
|
|
- cb.last[z.bytes_written & cb.window_mask] = c;
|
|
|
|
|
|
+ #no_bounds_check {
|
|
|
+ z.output.buf[z.bytes_written] = c;
|
|
|
+ }
|
|
|
z.bytes_written += 1;
|
|
|
return .None;
|
|
|
}
|
|
|
|
|
|
@(optimization_mode="speed")
|
|
|
-repl_byte :: proc(z: ^Context, cb: ^Code_Buffer, count: u16, c: u8) -> (err: io.Error) {
|
|
|
- when #config(TRACY_ENABLE, false) { tracy.ZoneN("Repl Byte"); }
|
|
|
+repl_byte :: proc(z: ^$C, count: u16, c: u8) -> (err: io.Error) #no_bounds_check {
|
|
|
/*
|
|
|
TODO(Jeroen): Once we have a magic ring buffer, we can just peek/write into it
|
|
|
without having to worry about wrapping, so no need for a temp allocation to give to
|
|
|
the output stream, just give it _that_ slice.
|
|
|
*/
|
|
|
- buf := make([]u8, count, context.temp_allocator);
|
|
|
- #no_bounds_check for i in 0..<count {
|
|
|
- buf[i] = c;
|
|
|
- cb.last[z.bytes_written & cb.window_mask] = c;
|
|
|
- z.bytes_written += 1;
|
|
|
+
|
|
|
+ /*
|
|
|
+ Resize if needed.
|
|
|
+ */
|
|
|
+ if int(z.bytes_written) + int(count) >= len(z.output.buf) {
|
|
|
+ e := grow_buffer(&z.output.buf);
|
|
|
+ if e != nil {
|
|
|
+ return .Short_Write;
|
|
|
+ }
|
|
|
}
|
|
|
- when INLINE_ADLER { z.rolling_hash = hash.adler32(buf, z.rolling_hash); }
|
|
|
|
|
|
- _, e := z.output->impl_write(buf);
|
|
|
- if e != .None {
|
|
|
- return e;
|
|
|
+ #no_bounds_check {
|
|
|
+ for _ in 0..<count {
|
|
|
+ z.output.buf[z.bytes_written] = c;
|
|
|
+ z.bytes_written += 1;
|
|
|
+ }
|
|
|
}
|
|
|
+
|
|
|
return .None;
|
|
|
}
|
|
|
|
|
|
@(optimization_mode="speed")
|
|
|
-repl_bytes :: proc(z: ^Context, cb: ^Code_Buffer, count: u16, distance: u16) -> (err: io.Error) {
|
|
|
- when #config(TRACY_ENABLE, false) { tracy.ZoneN("Repl Bytes"); }
|
|
|
+repl_bytes :: proc(z: ^$C, count: u16, distance: u16) -> (err: io.Error) {
|
|
|
/*
|
|
|
TODO(Jeroen): Once we have a magic ring buffer, we can just peek/write into it
|
|
|
without having to worry about wrapping, so no need for a temp allocation to give to
|
|
|
the output stream, just give it _that_ slice.
|
|
|
*/
|
|
|
- buf := make([]u8, count, context.temp_allocator);
|
|
|
|
|
|
- offset := z.bytes_written - i64(distance);
|
|
|
- #no_bounds_check for i in 0..<count {
|
|
|
- c := cb.last[offset & cb.window_mask];
|
|
|
+ offset := i64(distance);
|
|
|
|
|
|
- cb.last[z.bytes_written & cb.window_mask] = c;
|
|
|
- buf[i] = c;
|
|
|
- z.bytes_written += 1; offset += 1;
|
|
|
+ if int(z.bytes_written) + int(count) >= len(z.output.buf) {
|
|
|
+ e := grow_buffer(&z.output.buf);
|
|
|
+ if e != nil {
|
|
|
+ return .Short_Write;
|
|
|
+ }
|
|
|
}
|
|
|
- when INLINE_ADLER { z.rolling_hash = hash.adler32(buf, z.rolling_hash); }
|
|
|
|
|
|
- _, e := z.output->impl_write(buf);
|
|
|
- if e != .None {
|
|
|
- return e;
|
|
|
+ #no_bounds_check {
|
|
|
+ for _ in 0..<count {
|
|
|
+ c := z.output.buf[z.bytes_written - offset];
|
|
|
+ z.output.buf[z.bytes_written] = c;
|
|
|
+ z.bytes_written += 1;
|
|
|
+ }
|
|
|
}
|
|
|
+
|
|
|
return .None;
|
|
|
}
|
|
|
|
|
@@ -214,7 +243,6 @@ allocate_huffman_table :: proc(allocator := context.allocator) -> (z: ^Huffman_T
|
|
|
|
|
|
@(optimization_mode="speed")
|
|
|
build_huffman :: proc(z: ^Huffman_Table, code_lengths: []u8) -> (err: Error) {
|
|
|
- when #config(TRACY_ENABLE, false) { tracy.ZoneN("Build Huffman Table"); }
|
|
|
sizes: [HUFFMAN_MAX_BITS+1]int;
|
|
|
next_code: [HUFFMAN_MAX_BITS]int;
|
|
|
|
|
@@ -273,9 +301,8 @@ build_huffman :: proc(z: ^Huffman_Table, code_lengths: []u8) -> (err: Error) {
|
|
|
}
|
|
|
|
|
|
@(optimization_mode="speed")
|
|
|
-decode_huffman_slowpath :: proc(z: ^Context, cb: ^Code_Buffer, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
|
|
|
- when #config(TRACY_ENABLE, false) { tracy.ZoneN("Decode Huffman Slow"); }
|
|
|
- code := u16(compress.peek_bits_lsb(z, cb, 16));
|
|
|
+decode_huffman_slowpath :: proc(z: ^$C, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
|
|
|
+ code := u16(compress.peek_bits_lsb(z,16));
|
|
|
|
|
|
k := int(z_bit_reverse(code, 16));
|
|
|
s: u8;
|
|
@@ -298,43 +325,41 @@ decode_huffman_slowpath :: proc(z: ^Context, cb: ^Code_Buffer, t: ^Huffman_Table
|
|
|
return 0, E_Deflate.Bad_Huffman_Code;
|
|
|
}
|
|
|
|
|
|
- compress.consume_bits_lsb(cb, s);
|
|
|
+ compress.consume_bits_lsb(z, s);
|
|
|
|
|
|
r = t.value[b];
|
|
|
return r, nil;
|
|
|
}
|
|
|
|
|
|
@(optimization_mode="speed")
|
|
|
-decode_huffman :: proc(z: ^Context, cb: ^Code_Buffer, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
|
|
|
- when #config(TRACY_ENABLE, false) { tracy.ZoneN("Decode Huffman"); }
|
|
|
- if cb.num_bits < 16 {
|
|
|
- if cb.num_bits > 63 {
|
|
|
+decode_huffman :: proc(z: ^$C, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
|
|
|
+ if z.num_bits < 16 {
|
|
|
+ if z.num_bits > 63 {
|
|
|
return 0, E_ZLIB.Code_Buffer_Malformed;
|
|
|
}
|
|
|
- compress.refill_lsb(z, cb);
|
|
|
- if cb.num_bits > 63 {
|
|
|
+ compress.refill_lsb(z);
|
|
|
+ if z.num_bits > 63 {
|
|
|
return 0, E_General.Stream_Too_Short;
|
|
|
}
|
|
|
}
|
|
|
- #no_bounds_check b := t.fast[cb.code_buffer & ZFAST_MASK];
|
|
|
+ #no_bounds_check b := t.fast[z.code_buffer & ZFAST_MASK];
|
|
|
if b != 0 {
|
|
|
s := u8(b >> ZFAST_BITS);
|
|
|
- compress.consume_bits_lsb(cb, s);
|
|
|
+ compress.consume_bits_lsb(z, s);
|
|
|
return b & 511, nil;
|
|
|
}
|
|
|
- return decode_huffman_slowpath(z, cb, t);
|
|
|
+ return decode_huffman_slowpath(z, t);
|
|
|
}
|
|
|
|
|
|
@(optimization_mode="speed")
|
|
|
-parse_huffman_block :: proc(z: ^Context, cb: ^Code_Buffer, z_repeat, z_offset: ^Huffman_Table) -> (err: Error) #no_bounds_check {
|
|
|
- when #config(TRACY_ENABLE, false) { tracy.ZoneN("Parse Huffman Block"); }
|
|
|
+parse_huffman_block :: proc(z: ^$C, z_repeat, z_offset: ^Huffman_Table) -> (err: Error) #no_bounds_check {
|
|
|
#no_bounds_check for {
|
|
|
- value, e := decode_huffman(z, cb, z_repeat);
|
|
|
+ value, e := decode_huffman(z, z_repeat);
|
|
|
if e != nil {
|
|
|
return err;
|
|
|
}
|
|
|
if value < 256 {
|
|
|
- e := write_byte(z, cb, u8(value));
|
|
|
+ e := write_byte(z, u8(value));
|
|
|
if e != .None {
|
|
|
return E_General.Output_Too_Short;
|
|
|
}
|
|
@@ -347,17 +372,17 @@ parse_huffman_block :: proc(z: ^Context, cb: ^Code_Buffer, z_repeat, z_offset: ^
|
|
|
value -= 257;
|
|
|
length := Z_LENGTH_BASE[value];
|
|
|
if Z_LENGTH_EXTRA[value] > 0 {
|
|
|
- length += u16(compress.read_bits_lsb(z, cb, Z_LENGTH_EXTRA[value]));
|
|
|
+ length += u16(compress.read_bits_lsb(z, Z_LENGTH_EXTRA[value]));
|
|
|
}
|
|
|
|
|
|
- value, e = decode_huffman(z, cb, z_offset);
|
|
|
+ value, e = decode_huffman(z, z_offset);
|
|
|
if e != nil {
|
|
|
return E_Deflate.Bad_Huffman_Code;
|
|
|
}
|
|
|
|
|
|
distance := Z_DIST_BASE[value];
|
|
|
if Z_DIST_EXTRA[value] > 0 {
|
|
|
- distance += u16(compress.read_bits_lsb(z, cb, Z_DIST_EXTRA[value]));
|
|
|
+ distance += u16(compress.read_bits_lsb(z, Z_DIST_EXTRA[value]));
|
|
|
}
|
|
|
|
|
|
if z.bytes_written < i64(distance) {
|
|
@@ -365,7 +390,6 @@ parse_huffman_block :: proc(z: ^Context, cb: ^Code_Buffer, z_repeat, z_offset: ^
|
|
|
return E_Deflate.Bad_Distance;
|
|
|
}
|
|
|
|
|
|
- offset := i64(z.bytes_written - i64(distance));
|
|
|
/*
|
|
|
These might be sped up with a repl_byte call that copies
|
|
|
from the already written output more directly, and that
|
|
@@ -378,15 +402,15 @@ parse_huffman_block :: proc(z: ^Context, cb: ^Code_Buffer, z_repeat, z_offset: ^
|
|
|
Replicate the last outputted byte, length times.
|
|
|
*/
|
|
|
if length > 0 {
|
|
|
- c := cb.last[offset & cb.window_mask];
|
|
|
- e := repl_byte(z, cb, length, c);
|
|
|
+ c := z.output.buf[z.bytes_written - i64(distance)];
|
|
|
+ e := repl_byte(z, length, c);
|
|
|
if e != .None {
|
|
|
return E_General.Output_Too_Short;
|
|
|
}
|
|
|
}
|
|
|
} else {
|
|
|
if length > 0 {
|
|
|
- e := repl_bytes(z, cb, length, distance);
|
|
|
+ e := repl_bytes(z, length, distance);
|
|
|
if e != .None {
|
|
|
return E_General.Output_Too_Short;
|
|
|
}
|
|
@@ -397,25 +421,17 @@ parse_huffman_block :: proc(z: ^Context, cb: ^Code_Buffer, z_repeat, z_offset: ^
|
|
|
}
|
|
|
|
|
|
@(optimization_mode="speed")
|
|
|
-inflate_from_stream :: proc(using ctx: ^Context, raw := false, allocator := context.allocator) -> (err: Error) #no_bounds_check {
|
|
|
+inflate_from_context :: proc(using ctx: ^compress.Context_Memory_Input, raw := false, expected_output_size := -1, allocator := context.allocator) -> (err: Error) #no_bounds_check {
|
|
|
/*
|
|
|
- ctx.input must be an io.Stream backed by an implementation that supports:
|
|
|
- - read
|
|
|
- - size
|
|
|
-
|
|
|
- ctx.output must be an io.Stream backed by an implementation that supports:
|
|
|
- - write
|
|
|
+ ctx.output must be a bytes.Buffer for now. We'll add a separate implementation that writes to a stream.
|
|
|
|
|
|
raw determines whether the ZLIB header is processed, or we're inflating a raw
|
|
|
DEFLATE stream.
|
|
|
*/
|
|
|
|
|
|
- code_buffer := Code_Buffer{};
|
|
|
- cb := &code_buffer;
|
|
|
-
|
|
|
if !raw {
|
|
|
- data_size := io.size(ctx.input);
|
|
|
- if data_size < 6 {
|
|
|
+ size, size_err := compress.input_size(ctx);
|
|
|
+ if size < 6 || size_err != nil {
|
|
|
return E_General.Stream_Too_Short;
|
|
|
}
|
|
|
|
|
@@ -430,8 +446,6 @@ inflate_from_stream :: proc(using ctx: ^Context, raw := false, allocator := cont
|
|
|
if cinfo > 7 {
|
|
|
return E_ZLIB.Unsupported_Window_Size;
|
|
|
}
|
|
|
- cb.window_mask = i64((1 << (cinfo + 8) - 1));
|
|
|
-
|
|
|
flg, _ := compress.read_u8(ctx);
|
|
|
|
|
|
fcheck := flg & 0x1f;
|
|
@@ -456,40 +470,59 @@ inflate_from_stream :: proc(using ctx: ^Context, raw := false, allocator := cont
|
|
|
at the end to compare checksums.
|
|
|
*/
|
|
|
|
|
|
- // Seed the Adler32 rolling checksum.
|
|
|
- ctx.rolling_hash = 1;
|
|
|
}
|
|
|
|
|
|
// Parse ZLIB stream without header.
|
|
|
- err = inflate_raw(ctx, cb);
|
|
|
+ err = inflate_raw(z=ctx, expected_output_size=expected_output_size);
|
|
|
if err != nil {
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
if !raw {
|
|
|
- compress.discard_to_next_byte_lsb(cb);
|
|
|
- adler32 := compress.read_bits_lsb(ctx, cb, 8) << 24 | compress.read_bits_lsb(ctx, cb, 8) << 16 | compress.read_bits_lsb(ctx, cb, 8) << 8 | compress.read_bits_lsb(ctx, cb, 8);
|
|
|
+ compress.discard_to_next_byte_lsb(ctx);
|
|
|
|
|
|
- when !INLINE_ADLER {
|
|
|
- buf := (^bytes.Buffer)(ctx.output.stream_data).buf[:];
|
|
|
- ctx.rolling_hash = hash.adler32(buf);
|
|
|
+ adler_b: [4]u8;
|
|
|
+ for _, i in adler_b {
|
|
|
+ adler_b[i], _ = compress.read_u8_prefer_code_buffer_lsb(ctx);
|
|
|
}
|
|
|
+ adler := transmute(u32be)adler_b;
|
|
|
|
|
|
- if ctx.rolling_hash != u32(adler32) {
|
|
|
+ output_hash := hash.adler32(ctx.output.buf[:]);
|
|
|
+
|
|
|
+ if output_hash != u32(adler) {
|
|
|
return E_General.Checksum_Failed;
|
|
|
}
|
|
|
}
|
|
|
return nil;
|
|
|
}
|
|
|
|
|
|
+// TODO: Check alignment of reserve/resize.
|
|
|
+
|
|
|
@(optimization_mode="speed")
|
|
|
-inflate_from_stream_raw :: proc(z: ^Context, cb: ^Code_Buffer, allocator := context.allocator) -> (err: Error) #no_bounds_check {
|
|
|
- when #config(TRACY_ENABLE, false) { tracy.ZoneN("Inflate Raw"); }
|
|
|
- final := u32(0);
|
|
|
- type := u32(0);
|
|
|
+inflate_raw :: proc(z: ^$C, expected_output_size := -1, allocator := context.allocator) -> (err: Error) #no_bounds_check {
|
|
|
+ expected_output_size := expected_output_size;
|
|
|
|
|
|
- cb.num_bits = 0;
|
|
|
- cb.code_buffer = 0;
|
|
|
+ /*
|
|
|
+ Always set up a minimum allocation size.
|
|
|
+ */
|
|
|
+ expected_output_size = max(max(expected_output_size, compress.COMPRESS_OUTPUT_ALLOCATE_MIN), 512);
|
|
|
+
|
|
|
+ // fmt.printf("\nZLIB: Expected Payload Size: %v\n\n", expected_output_size);
|
|
|
+
|
|
|
+ if expected_output_size > 0 && expected_output_size <= compress.COMPRESS_OUTPUT_ALLOCATE_MAX {
|
|
|
+ /*
|
|
|
+ Try to pre-allocate the output buffer.
|
|
|
+ */
|
|
|
+ reserve(&z.output.buf, expected_output_size);
|
|
|
+ resize (&z.output.buf, expected_output_size);
|
|
|
+ };
|
|
|
+
|
|
|
+ if len(z.output.buf) != expected_output_size {
|
|
|
+ return .Resize_Failed;
|
|
|
+ }
|
|
|
+
|
|
|
+ z.num_bits = 0;
|
|
|
+ z.code_buffer = 0;
|
|
|
|
|
|
z_repeat: ^Huffman_Table;
|
|
|
z_offset: ^Huffman_Table;
|
|
@@ -511,30 +544,24 @@ inflate_from_stream_raw :: proc(z: ^Context, cb: ^Code_Buffer, allocator := cont
|
|
|
defer free(z_offset);
|
|
|
defer free(codelength_ht);
|
|
|
|
|
|
- if cb.window_mask == 0 {
|
|
|
- cb.window_mask = DEFLATE_MAX_DISTANCE - 1;
|
|
|
- }
|
|
|
-
|
|
|
- // Allocate rolling window buffer.
|
|
|
- cb.last = mem.make_dynamic_array_len_cap([dynamic]u8, cb.window_mask + 1, cb.window_mask + 1, allocator);
|
|
|
- defer delete(cb.last);
|
|
|
+ final := u32(0);
|
|
|
+ type := u32(0);
|
|
|
|
|
|
for {
|
|
|
- final = compress.read_bits_lsb(z, cb, 1);
|
|
|
- type = compress.read_bits_lsb(z, cb, 2);
|
|
|
+ final = compress.read_bits_lsb(z, 1);
|
|
|
+ type = compress.read_bits_lsb(z, 2);
|
|
|
|
|
|
// fmt.printf("Final: %v | Type: %v\n", final, type);
|
|
|
|
|
|
switch type {
|
|
|
case 0:
|
|
|
- when #config(TRACY_ENABLE, false) { tracy.ZoneN("Literal Block"); }
|
|
|
// Uncompressed block
|
|
|
|
|
|
// Discard bits until next byte boundary
|
|
|
- compress.discard_to_next_byte_lsb(cb);
|
|
|
+ compress.discard_to_next_byte_lsb(z);
|
|
|
|
|
|
- uncompressed_len := i16(compress.read_bits_lsb(z, cb, 16));
|
|
|
- length_check := i16(compress.read_bits_lsb(z, cb, 16));
|
|
|
+ uncompressed_len := i16(compress.read_bits_lsb(z, 16));
|
|
|
+ length_check := i16(compress.read_bits_lsb(z, 16));
|
|
|
|
|
|
// fmt.printf("LEN: %v, ~LEN: %v, NLEN: %v, ~NLEN: %v\n", uncompressed_len, ~uncompressed_len, length_check, ~length_check);
|
|
|
|
|
@@ -548,15 +575,14 @@ inflate_from_stream_raw :: proc(z: ^Context, cb: ^Code_Buffer, allocator := cont
|
|
|
and a single Adler32 update after.
|
|
|
*/
|
|
|
#no_bounds_check for uncompressed_len > 0 {
|
|
|
- compress.refill_lsb(z, cb);
|
|
|
- lit := compress.read_bits_lsb(z, cb, 8);
|
|
|
- write_byte(z, cb, u8(lit));
|
|
|
+ compress.refill_lsb(z);
|
|
|
+ lit := compress.read_bits_lsb(z, 8);
|
|
|
+ write_byte(z, u8(lit));
|
|
|
uncompressed_len -= 1;
|
|
|
}
|
|
|
case 3:
|
|
|
return E_Deflate.BType_3;
|
|
|
case:
|
|
|
- when #config(TRACY_ENABLE, false) { tracy.ZoneN("Huffman Block"); }
|
|
|
// log.debugf("Err: %v | Final: %v | Type: %v\n", err, final, type);
|
|
|
if type == 1 {
|
|
|
// Use fixed code lengths.
|
|
@@ -575,14 +601,14 @@ inflate_from_stream_raw :: proc(z: ^Context, cb: ^Code_Buffer, allocator := cont
|
|
|
//i: u32;
|
|
|
n: u32;
|
|
|
|
|
|
- compress.refill_lsb(z, cb, 14);
|
|
|
- hlit := compress.read_bits_no_refill_lsb(z, cb, 5) + 257;
|
|
|
- hdist := compress.read_bits_no_refill_lsb(z, cb, 5) + 1;
|
|
|
- hclen := compress.read_bits_no_refill_lsb(z, cb, 4) + 4;
|
|
|
+ compress.refill_lsb(z, 14);
|
|
|
+ hlit := compress.read_bits_no_refill_lsb(z, 5) + 257;
|
|
|
+ hdist := compress.read_bits_no_refill_lsb(z, 5) + 1;
|
|
|
+ hclen := compress.read_bits_no_refill_lsb(z, 4) + 4;
|
|
|
ntot := hlit + hdist;
|
|
|
|
|
|
#no_bounds_check for i in 0..<hclen {
|
|
|
- s := compress.read_bits_lsb(z, cb, 3);
|
|
|
+ s := compress.read_bits_lsb(z, 3);
|
|
|
codelength_sizes[Z_LENGTH_DEZIGZAG[i]] = u8(s);
|
|
|
}
|
|
|
err = build_huffman(codelength_ht, codelength_sizes[:]);
|
|
@@ -594,7 +620,7 @@ inflate_from_stream_raw :: proc(z: ^Context, cb: ^Code_Buffer, allocator := cont
|
|
|
c: u16;
|
|
|
|
|
|
for n < ntot {
|
|
|
- c, err = decode_huffman(z, cb, codelength_ht);
|
|
|
+ c, err = decode_huffman(z, codelength_ht);
|
|
|
if err != nil {
|
|
|
return err;
|
|
|
}
|
|
@@ -607,18 +633,18 @@ inflate_from_stream_raw :: proc(z: ^Context, cb: ^Code_Buffer, allocator := cont
|
|
|
n += 1;
|
|
|
} else {
|
|
|
fill := u8(0);
|
|
|
- compress.refill_lsb(z, cb, 7);
|
|
|
+ compress.refill_lsb(z, 7);
|
|
|
switch c {
|
|
|
case 16:
|
|
|
- c = u16(compress.read_bits_no_refill_lsb(z, cb, 2) + 3);
|
|
|
+ c = u16(compress.read_bits_no_refill_lsb(z, 2) + 3);
|
|
|
if n == 0 {
|
|
|
return E_Deflate.Huffman_Bad_Code_Lengths;
|
|
|
}
|
|
|
fill = lencodes[n - 1];
|
|
|
case 17:
|
|
|
- c = u16(compress.read_bits_no_refill_lsb(z, cb, 3) + 3);
|
|
|
+ c = u16(compress.read_bits_no_refill_lsb(z, 3) + 3);
|
|
|
case 18:
|
|
|
- c = u16(compress.read_bits_no_refill_lsb(z, cb, 7) + 11);
|
|
|
+ c = u16(compress.read_bits_no_refill_lsb(z, 7) + 11);
|
|
|
case:
|
|
|
return E_Deflate.Huffman_Bad_Code_Lengths;
|
|
|
}
|
|
@@ -648,7 +674,7 @@ inflate_from_stream_raw :: proc(z: ^Context, cb: ^Code_Buffer, allocator := cont
|
|
|
return err;
|
|
|
}
|
|
|
}
|
|
|
- err = parse_huffman_block(z, cb, z_repeat, z_offset);
|
|
|
+ err = parse_huffman_block(z, z_repeat, z_offset);
|
|
|
// log.debugf("Err: %v | Final: %v | Type: %v\n", err, final, type);
|
|
|
if err != nil {
|
|
|
return err;
|
|
@@ -659,44 +685,31 @@ inflate_from_stream_raw :: proc(z: ^Context, cb: ^Code_Buffer, allocator := cont
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ if int(z.bytes_written) != len(z.output.buf) {
|
|
|
+ resize(&z.output.buf, int(z.bytes_written));
|
|
|
+ }
|
|
|
+
|
|
|
return nil;
|
|
|
}
|
|
|
|
|
|
-inflate_from_byte_array :: proc(input: []u8, buf: ^bytes.Buffer, raw := false) -> (err: Error) {
|
|
|
- ctx := Context{};
|
|
|
+inflate_from_byte_array :: proc(input: []u8, buf: ^bytes.Buffer, raw := false, expected_output_size := -1) -> (err: Error) {
|
|
|
+ ctx := compress.Context_Memory_Input{};
|
|
|
|
|
|
- r := bytes.Reader{};
|
|
|
- bytes.reader_init(&r, input);
|
|
|
- rs := bytes.reader_to_stream(&r);
|
|
|
- ctx.input = rs;
|
|
|
ctx.input_data = input;
|
|
|
- ctx.input_fully_in_memory = true;
|
|
|
-
|
|
|
- buf := buf;
|
|
|
- ws := bytes.buffer_to_stream(buf);
|
|
|
- ctx.output = ws;
|
|
|
+ ctx.output = buf;
|
|
|
|
|
|
- err = inflate_from_stream(&ctx, raw);
|
|
|
+ err = inflate_from_context(ctx=&ctx, raw=raw, expected_output_size=expected_output_size);
|
|
|
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
-inflate_from_byte_array_raw :: proc(input: []u8, buf: ^bytes.Buffer, cb: ^Code_Buffer, raw := false) -> (err: Error) {
|
|
|
- ctx := Context{};
|
|
|
+inflate_from_byte_array_raw :: proc(input: []u8, buf: ^bytes.Buffer, raw := false, expected_output_size := -1) -> (err: Error) {
|
|
|
+ ctx := compress.Context_Memory_Input{};
|
|
|
|
|
|
- r := bytes.Reader{};
|
|
|
- bytes.reader_init(&r, input);
|
|
|
- rs := bytes.reader_to_stream(&r);
|
|
|
- ctx.input = rs;
|
|
|
ctx.input_data = input;
|
|
|
- ctx.input_fully_in_memory = true;
|
|
|
-
|
|
|
- buf := buf;
|
|
|
- ws := bytes.buffer_to_stream(buf);
|
|
|
- ctx.output = ws;
|
|
|
+ ctx.output = buf;
|
|
|
|
|
|
- return inflate_from_stream_raw(&ctx, cb);
|
|
|
+ return inflate_raw(z=&ctx, expected_output_size=expected_output_size);
|
|
|
}
|
|
|
|
|
|
-inflate :: proc{inflate_from_stream, inflate_from_byte_array};
|
|
|
-inflate_raw :: proc{inflate_from_stream_raw, inflate_from_byte_array_raw};
|
|
|
+inflate :: proc{inflate_from_context, inflate_from_byte_array};
|