Browse Source

PNG: Let PNG use the new compress I/O routines.

Jeroen van Rijn 4 years ago
parent
commit
eaf88bcc4d
4 changed files with 32 additions and 342 deletions
  1. 14 6
      core/compress/common.odin
  2. 7 244
      core/compress/gzip/gzip.odin
  3. 5 80
      core/compress/zlib/zlib.odin
  4. 6 12
      core/image/png/png.odin

+ 14 - 6
core/compress/common.odin

@@ -182,6 +182,16 @@ Context_Stream_Input :: struct #packed {
 
 // TODO: Make these return compress.Error errors.
 
+input_size_from_memory :: proc(z: ^Context_Memory_Input) -> (res: i64, err: Error) {
+	return i64(len(z.input_data)), nil;
+}
+
+input_size_from_stream :: proc(z: ^Context_Stream_Input) -> (res: i64, err: Error) {
+	return io.size(z.input), nil;
+}
+
+input_size :: proc{input_size_from_memory, input_size_from_stream};
+
 @(optimization_mode="speed")
 read_slice_from_memory :: #force_inline proc(z: ^Context_Memory_Input, size: int) -> (res: []u8, err: io.Error) {
 	#no_bounds_check {
@@ -257,12 +267,10 @@ peek_data_from_memory :: #force_inline proc(z: ^Context_Memory_Input, $T: typeid
 		}
 	}
 
-	if z.input_fully_in_memory {
-		if len(z.input_data) < size {
-			return T{}, .EOF;
-		} else {
-			return T{}, .Short_Buffer;
-		}
+	if len(z.input_data) == 0 {
+		return T{}, .EOF;
+	} else {
+		return T{}, .Short_Buffer;
 	}
 }
 

+ 7 - 244
core/compress/gzip/gzip.odin

@@ -103,7 +103,7 @@ E_Deflate :: compress.Deflate_Error;
 
 GZIP_MAX_PAYLOAD_SIZE :: int(max(u32le));
 
-load :: proc{load_from_slice, load_from_stream, load_from_file};
+load :: proc{load_from_slice, load_from_file, load_from_context};
 
 load_from_file :: proc(filename: string, buf: ^bytes.Buffer, expected_output_size := -1, allocator := context.allocator) -> (err: Error) {
 	data, ok := os.read_entire_file(filename, allocator);
@@ -123,248 +123,10 @@ load_from_slice :: proc(slice: []u8, buf: ^bytes.Buffer, known_gzip_size := -1,
 		input_data = slice,
 		output = buf,
 	};
-
-	expected_output_size := expected_output_size;
-	input_data_consumed := 0;
-	z.output = buf;
-
-	if expected_output_size > GZIP_MAX_PAYLOAD_SIZE {
-		return E_GZIP.Payload_Size_Exceeds_Max_Payload;
-	}
-
-	if expected_output_size > compress.COMPRESS_OUTPUT_ALLOCATE_MAX {
-		return E_GZIP.Output_Exceeds_COMPRESS_OUTPUT_ALLOCATE_MAX;
-	}
-
-	b: []u8;
-
-	header, e := compress.read_data(z, Header);
-	if e != .None {
-		return E_General.File_Too_Short;
-	}
-	input_data_consumed += size_of(Header);
-
-	if header.magic != .GZIP {
-		return E_GZIP.Invalid_GZIP_Signature;
-	}
-	if header.compression_method != .DEFLATE {
-		return E_General.Unknown_Compression_Method;
-	}
-
-	if header.os >= ._Unknown {
-		header.os = .Unknown;
-	}
-
-	if .reserved_1 in header.flags || .reserved_2 in header.flags || .reserved_3 in header.flags {
-		return E_GZIP.Reserved_Flag_Set;
-	}
-
-	// printf("signature: %v\n", header.magic);
-	// printf("compression: %v\n", header.compression_method);
-	// printf("flags: %v\n", header.flags);
-	// printf("modification time: %v\n", time.unix(i64(header.modification_time), 0));
-	// printf("xfl: %v (%v)\n", header.xfl, int(header.xfl));
-	// printf("os: %v\n", OS_Name[header.os]);
-
-	if .extra in header.flags {
-		xlen, e_extra := compress.read_data(z, u16le);
-		input_data_consumed += 2;
-
-		if e_extra != .None {
-			return E_General.Stream_Too_Short;
-		}
-		// printf("Extra data present (%v bytes)\n", xlen);
-		if xlen < 4 {
-			// Minimum length is 2 for ID + 2 for a field length, if set to zero.
-			return E_GZIP.Invalid_Extra_Data;
-		}
-
-		field_id:     [2]u8;
-		field_length: u16le;
-		field_error: io.Error;
-
-		for xlen >= 4 {
-			// println("Parsing Extra field(s).");
-			field_id, field_error = compress.read_data(z, [2]u8);
-			if field_error != .None {
-				// printf("Parsing Extra returned: %v\n", field_error);
-				return E_General.Stream_Too_Short;
-			}
-			xlen -= 2;
-			input_data_consumed += 2;
-
-			field_length, field_error = compress.read_data(z, u16le);
-			if field_error != .None {
-				// printf("Parsing Extra returned: %v\n", field_error);
-				return E_General.Stream_Too_Short;
-			}
-			xlen -= 2;
-			input_data_consumed += 2;
-
-			if xlen <= 0 {
-				// We're not going to try and recover by scanning for a ZLIB header.
-				// Who knows what else is wrong with this file.
-				return E_GZIP.Invalid_Extra_Data;
-			}
-
-			// printf("    Field \"%v\" of length %v found: ", string(field_id[:]), field_length);
-			if field_length > 0 {
-				b, field_error = compress.read_slice(z, int(field_length));
-				if field_error != .None {
-					// printf("Parsing Extra returned: %v\n", field_error);
-					return E_General.Stream_Too_Short;
-				}
-				xlen -= field_length;
-				input_data_consumed += int(field_length);
-
-				// printf("%v\n", string(field_data));
-			}
-
-			if xlen != 0 {
-				return E_GZIP.Invalid_Extra_Data;
-			}
-		}
-	}
-
-	if .name in header.flags {
-		// Should be enough.
-		name: [1024]u8;
-		i := 0;
-		name_error: io.Error;
-
-		for i < len(name) {
-			b, name_error = compress.read_slice(z, 1);
-			if name_error != .None {
-				return E_General.Stream_Too_Short;
-			}
-			input_data_consumed += 1;
-			if b[0] == 0 {
-				break;
-			}
-			name[i] = b[0];
-			i += 1;
-			if i >= len(name) {
-				return E_GZIP.Original_Name_Too_Long;
-			}
-		}
-		// printf("Original filename: %v\n", string(name[:i]));
-	}
-
-	if .comment in header.flags {
-		// Should be enough.
-		comment: [1024]u8;
-		i := 0;
-		comment_error: io.Error;
-
-		for i < len(comment) {
-			b, comment_error = compress.read_slice(z, 1);
-			if comment_error != .None {
-				return E_General.Stream_Too_Short;
-			}
-			input_data_consumed += 1;
-			if b[0] == 0 {
-				break;
-			}
-			comment[i] = b[0];
-			i += 1;
-			if i >= len(comment) {
-				return E_GZIP.Comment_Too_Long;
-			}
-		}
-		// printf("Comment: %v\n", string(comment[:i]));
-	}
-
-	if .header_crc in header.flags {
-		crc_error: io.Error;
-		_, crc_error = compress.read_slice(z, 2);
-		input_data_consumed += 2;
-		if crc_error != .None {
-			return E_General.Stream_Too_Short;
-		}
-		/*
-			We don't actually check the CRC16 (lower 2 bytes of CRC32 of header data until the CRC field).
-			If we find a gzip file in the wild that sets this field, we can add proper support for it.
-		*/
-	}
-
-	/*
-		We should have arrived at the ZLIB payload.
-	*/
-	payload_u32le: u32le;
-
-	// fmt.printf("known_gzip_size: %v | expected_output_size: %v\n", known_gzip_size, expected_output_size);
-
-	if expected_output_size > -1 {
-		/*
-			We already checked that it's not larger than the output buffer max,
-			or GZIP length field's max.
-
-			We'll just pass it on to `zlib.inflate_raw`;
-		*/
-	} else {
-		/*
-			If we know the size of the GZIP file *and* it is fully in memory,
-			then we can peek at the unpacked size at the end.
-
-			We'll still want to ensure there's capacity left in the output buffer when we write, of course.
-
-		*/
-		if known_gzip_size > -1 {
-			offset := known_gzip_size - input_data_consumed - 4;
-			if len(z.input_data) >= offset + 4 {
-				length_bytes         := z.input_data[offset:][:4];
-				payload_u32le         = (^u32le)(&length_bytes[0])^;
-				expected_output_size = int(payload_u32le);
-			}
-		} else {
-			/*
-				TODO(Jeroen): When reading a GZIP from a stream, check if impl_seek is present.
-				If so, we can seek to the end, grab the size from the footer, and seek back to payload start.
-			*/
-		}
-	}
-
-	// fmt.printf("GZIP: Expected Payload Size: %v\n", expected_output_size);
-
-	zlib_error := zlib.inflate_raw(z=z, expected_output_size=expected_output_size);
-	if zlib_error != nil {
-		return zlib_error;
-	}
-	/*
-		Read CRC32 using the ctx bit reader because zlib may leave bytes in there.
-	*/
-	compress.discard_to_next_byte_lsb(z);
-
-	footer_error: io.Error;
-
-	payload_crc_b: [4]u8;
-	for _, i in payload_crc_b {
-		if z.num_bits >= 8 {
-			payload_crc_b[i] = u8(compress.read_bits_lsb(z, 8));
-		} else {
-			payload_crc_b[i], footer_error = compress.read_u8(z);
-		}
-	}
-	payload_crc := transmute(u32le)payload_crc_b;
-	payload_u32le, footer_error = compress.read_data(z, u32le);
-
-	payload := bytes.buffer_to_bytes(buf);
-
-	// fmt.printf("GZIP payload: %v\n", string(payload));
-
-	crc32 := u32le(hash.crc32(payload));
-
-	if crc32 != payload_crc {
-		return E_GZIP.Payload_CRC_Invalid;
-	}
-
-	if len(payload) != int(payload_u32le) {
-		return E_GZIP.Payload_Length_Invalid;
-	}
-	return nil;
+	return load_from_context(z, buf, known_gzip_size, expected_output_size, allocator);
 }
 
-load_from_stream :: proc(z: ^compress.Context_Stream_Input, buf: ^bytes.Buffer, known_gzip_size := -1, expected_output_size := -1, allocator := context.allocator) -> (err: Error) {
+load_from_context :: proc(z: ^$C, buf: ^bytes.Buffer, known_gzip_size := -1, expected_output_size := -1, allocator := context.allocator) -> (err: Error) {
 	buf := buf;
 	expected_output_size := expected_output_size;
 
@@ -553,9 +315,10 @@ load_from_stream :: proc(z: ^compress.Context_Stream_Input, buf: ^bytes.Buffer,
 			We'll still want to ensure there's capacity left in the output buffer when we write, of course.
 
 		*/
-		if z.input_fully_in_memory && known_gzip_size > -1 {
-			offset := known_gzip_size - input_data_consumed - 4;
-			if len(z.input_data) >= offset + 4 {
+		if known_gzip_size > -1 {
+			offset := i64(known_gzip_size - input_data_consumed - 4);
+			size, _ := compress.input_size(z);
+			if size >= offset + 4 {
 				length_bytes         := z.input_data[offset:][:4];
 				payload_u32le         = (^u32le)(&length_bytes[0])^;
 				expected_output_size = int(payload_u32le);

+ 5 - 80
core/compress/zlib/zlib.odin

@@ -423,7 +423,7 @@ parse_huffman_block :: proc(z: ^$C, z_repeat, z_offset: ^Huffman_Table) -> (err:
 }
 
 @(optimization_mode="speed")
-__inflate_from_memory :: proc(using ctx: ^compress.Context_Memory_Input, raw := false, expected_output_size := -1, allocator := context.allocator) -> (err: Error) #no_bounds_check {
+inflate_from_context :: proc(using ctx: ^compress.Context_Memory_Input, raw := false, expected_output_size := -1, allocator := context.allocator) -> (err: Error) #no_bounds_check {
 	/*
 		ctx.output must be a bytes.Buffer for now. We'll add a separate implementation that writes to a stream.
 
@@ -432,83 +432,8 @@ __inflate_from_memory :: proc(using ctx: ^compress.Context_Memory_Input, raw :=
 	*/
 
 	if !raw {
-		if len(ctx.input_data) < 6 {
-			return E_General.Stream_Too_Short;
-		}
-
-		cmf, _ := compress.read_u8(ctx);
-
-		method := Compression_Method(cmf & 0xf);
-		if method != .DEFLATE {
-			return E_General.Unknown_Compression_Method;
-		}
-
-		cinfo  := (cmf >> 4) & 0xf;
-		if cinfo > 7 {
-			return E_ZLIB.Unsupported_Window_Size;
-		}
-		flg, _ := compress.read_u8(ctx);
-
-		fcheck  := flg & 0x1f;
-		fcheck_computed := (cmf << 8 | flg) & 0x1f;
-		if fcheck != fcheck_computed {
-			return E_General.Checksum_Failed;
-		}
-
-		fdict   := (flg >> 5) & 1;
-		/*
-			We don't handle built-in dictionaries for now.
-			They're application specific and PNG doesn't use them.
-		*/
-		if fdict != 0 {
-			return E_ZLIB.FDICT_Unsupported;
-		}
-
-		// flevel  := Compression_Level((flg >> 6) & 3);
-		/*
-			Inflate can consume bits belonging to the Adler checksum.
-			We pass the entire stream to Inflate and will unget bytes if we need to
-			at the end to compare checksums.
-		*/
-
-	}
-
-	// Parse ZLIB stream without header.
-	err = inflate_raw(z=ctx, expected_output_size=expected_output_size);
-	if err != nil {
-		return err;
-	}
-
-	if !raw {
-		compress.discard_to_next_byte_lsb(ctx);
-		adler32 := compress.read_bits_lsb(ctx, 8) << 24 | compress.read_bits_lsb(ctx, 8) << 16 | compress.read_bits_lsb(ctx, 8) << 8 | compress.read_bits_lsb(ctx, 8);
-
-		output_hash := hash.adler32(ctx.output.buf[:]);
-
-		if output_hash != u32(adler32) {
-			return E_General.Checksum_Failed;
-		}
-	}
-	return nil;
-}
-
-
-@(optimization_mode="speed")
-__inflate_from_stream :: proc(using ctx: ^$C, raw := false, expected_output_size := -1, allocator := context.allocator) -> (err: Error) #no_bounds_check {
-	/*
-		ctx.input must be an io.Stream backed by an implementation that supports:
-		- read
-		- size
-
-		ctx.output must be a bytes.Buffer for now. We'll add a separate implementation that writes to a stream.
-
-		raw determines whether the ZLIB header is processed, or we're inflating a raw
-		DEFLATE stream.
-	*/
-
-	if !raw {
-		data_size := io.size(ctx.input);
-		if data_size < 6 {
+		size, size_err := compress.input_size(ctx);
+		if size < 6 || size_err != nil {
 			return E_General.Stream_Too_Short;
 		}
 
@@ -773,7 +698,7 @@ inflate_from_byte_array :: proc(input: []u8, buf: ^bytes.Buffer, raw := false, e
 	ctx.input_data = input;
 	ctx.output = buf;
 
-	err = __inflate_from_memory(ctx=&ctx, raw=raw, expected_output_size=expected_output_size);
+	err = inflate_from_context(ctx=&ctx, raw=raw, expected_output_size=expected_output_size);
 
 	return err;
 }
@@ -787,4 +712,4 @@ inflate_from_byte_array_raw :: proc(input: []u8, buf: ^bytes.Buffer, raw := fals
 	return inflate_raw(z=&ctx, expected_output_size=expected_output_size);
 }
 
-inflate     :: proc{__inflate_from_stream, inflate_from_byte_array};
+inflate     :: proc{inflate_from_context, inflate_from_byte_array};

+ 6 - 12
core/image/png/png.odin

@@ -245,7 +245,7 @@ ADAM7_Y_SPACING := []int{ 8,8,8,4,4,2,2 };
 
 // Implementation starts here
 
-read_chunk :: proc(ctx: ^compress.Context) -> (chunk: Chunk, err: Error) {
+read_chunk :: proc(ctx: ^$C) -> (chunk: Chunk, err: Error) {
 	ch, e := compress.read_data(ctx, Chunk_Header);
 	if e != .None {
 		return {}, E_General.Stream_Too_Short;
@@ -274,7 +274,7 @@ read_chunk :: proc(ctx: ^compress.Context) -> (chunk: Chunk, err: Error) {
 	return chunk, nil;
 }
 
-read_header :: proc(ctx: ^compress.Context) -> (IHDR, Error) {
+read_header :: proc(ctx: ^$C) -> (IHDR, Error) {
 	c, e := read_chunk(ctx);
 	if e != nil {
 		return {}, e;
@@ -353,14 +353,8 @@ chunk_type_to_name :: proc(type: ^Chunk_Type) -> string {
 }
 
 load_from_slice :: proc(slice: []u8, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
-	r := bytes.Reader{};
-	bytes.reader_init(&r, slice);
-	stream := bytes.reader_to_stream(&r);
-
-	ctx := &compress.Context{
-		input = stream,
+	ctx := &compress.Context_Memory_Input{
 		input_data = slice,
-		input_fully_in_memory = true,
 	};
 
 	/*
@@ -368,7 +362,7 @@ load_from_slice :: proc(slice: []u8, options := Options{}, allocator := context.
 		This way the stream reader could avoid the copy into the temp memory returned by it,
 		and instead return a slice into the original memory that's already owned by the caller.
 	*/
-	img, err = load_from_stream(ctx, options, allocator);
+	img, err = load_from_context(ctx, options, allocator);
 
 	return img, err;
 }
@@ -386,7 +380,7 @@ load_from_file :: proc(filename: string, options := Options{}, allocator := cont
 	}
 }
 
-load_from_stream :: proc(ctx: ^compress.Context, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
+load_from_context :: proc(ctx: ^$C, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
 	options := options;
 	if .info in options {
 		options |= {.return_metadata, .do_not_decompress_image};
@@ -1657,4 +1651,4 @@ defilter :: proc(img: ^Image, filter_bytes: ^bytes.Buffer, header: ^IHDR, option
 	return nil;
 }
 
-load :: proc{load_from_file, load_from_slice, load_from_stream};
+load :: proc{load_from_file, load_from_slice, load_from_context};