Browse Source

core/crypto: Stop using context.temp_allocator

The max digest size for the foreseeable future will be 512 bits, and the
max block size is currently 1152 bits (SHA3-224).  If people add more
exotic hash algorithms without bumping the constants when required,
tests will fail.

The stream buffer will currently be 576 bytes, which is "fine" to just
stick on the stack, and is a sensible multiple of the more common block
size of 64 bytes.
Yawning Angel 1 year ago
parent
commit
44758f2a60

+ 4 - 8
core/crypto/hash/hash.odin

@@ -56,17 +56,13 @@ hash_stream :: proc(
 ) {
 	ctx: Context
 
-	init(&ctx, algorithm)
+	buf: [MAX_BLOCK_SIZE * 4]byte
+	defer mem.zero_explicit(&buf, size_of(buf))
 
-	buffer_size := block_size(&ctx) * 4
-	buf := make([]byte, buffer_size, context.temp_allocator)
-	defer {
-		mem.zero_explicit(raw_data(buf), buffer_size)
-		delete(buf, context.temp_allocator)
-	}
+	init(&ctx, algorithm)
 
 	loop: for {
-		n, err := io.read(s, buf)
+		n, err := io.read(s, buf[:])
 		if n > 0 {
 			// XXX/yawning: Can io.read return n > 0 and EOF?
 			update(&ctx, buf[:n])

+ 7 - 0
core/crypto/hash/low_level.odin

@@ -11,6 +11,13 @@ import "core:crypto/legacy/sha1"
 
 import "core:reflect"
 
+// MAX_DIGEST_SIZE is the maximum size digest that can be returned by any
+// of the Algorithms supported via this package.
+MAX_DIGEST_SIZE :: 64
+// MAX_BLOCK_SIZE is the maximum block size used by any of Algorithms
+// supported by this package.
+MAX_BLOCK_SIZE :: sha3.BLOCK_SIZE_224
+
 // Algorithm is the algorithm identifier associated with a given Context.
 Algorithm :: enum {
 	Invalid,

+ 6 - 7
core/crypto/hmac/hmac.odin

@@ -6,7 +6,6 @@ See:
 */
 package hmac
 
-import "base:runtime"
 import "core:crypto"
 import "core:crypto/hash"
 import "core:mem"
@@ -26,10 +25,9 @@ sum :: proc(algorithm: hash.Algorithm, dst, msg, key: []byte) {
 // and key over msg and return true iff the tag is valid.  It requires
 // that the tag is correctly sized.
 verify :: proc(algorithm: hash.Algorithm, tag, msg, key: []byte) -> bool {
-	runtime.DEFAULT_TEMP_ALLOCATOR_TEMP_GUARD()
-	tag_sz := hash.DIGEST_SIZES[algorithm]
+	tag_buf: [hash.MAX_DIGEST_SIZE]byte
 
-	derived_tag := make([]byte, tag_sz, context.temp_allocator)
+	derived_tag := tag_buf[:hash.DIGEST_SIZES[algorithm]]
 	sum(algorithm, derived_tag, msg, key)
 
 	return crypto.compare_constant_time(derived_tag, tag) == 1
@@ -113,11 +111,12 @@ _O_PAD :: 0x5c
 
 @(private)
 _init_hashes :: proc(ctx: ^Context, algorithm: hash.Algorithm, key: []byte) {
-	runtime.DEFAULT_TEMP_ALLOCATOR_TEMP_GUARD()
+	K0_buf: [hash.MAX_BLOCK_SIZE]byte
+	kPad_buf: [hash.MAX_BLOCK_SIZE]byte
 
 	kLen := len(key)
 	B := hash.BLOCK_SIZES[algorithm]
-	K0 := make([]byte, B, context.temp_allocator)
+	K0 := K0_buf[:B]
 	defer mem.zero_explicit(raw_data(K0), B)
 
 	switch {
@@ -148,7 +147,7 @@ _init_hashes :: proc(ctx: ^Context, algorithm: hash.Algorithm, key: []byte) {
 	hash.init(&ctx._o_hash, algorithm)
 	hash.init(&ctx._i_hash, algorithm)
 
-	kPad := make([]byte, B, context.temp_allocator)
+	kPad := kPad_buf[:B]
 	defer mem.zero_explicit(raw_data(kPad), B)
 
 	for v, i in K0 {

+ 25 - 0
tests/core/crypto/test_core_crypto_hash.odin

@@ -514,6 +514,31 @@ test_hash :: proc(t: ^testing.T) {
 
 		algo_name := hash.ALGORITHM_NAMES[algo]
 
+		// Ensure that the MAX_(DIGEST_SIZE, BLOCK_SIZE) constants are
+		// still correct.
+		digest_sz := hash.DIGEST_SIZES[algo]
+		block_sz := hash.BLOCK_SIZES[algo]
+		expect(
+			t,
+			digest_sz <= hash.MAX_DIGEST_SIZE,
+			fmt.tprintf(
+				"%s: Digest size %d exceeds max %d",
+				algo_name,
+				digest_sz,
+				hash.MAX_DIGEST_SIZE,
+			),
+		)
+		expect(
+			t,
+			block_sz <= hash.MAX_BLOCK_SIZE,
+			fmt.tprintf(
+				"%s: Block size %d exceeds max %d",
+				algo_name,
+				block_sz,
+				hash.MAX_BLOCK_SIZE,
+			),
+		)
+
 		// Exercise most of the happy-path for the high level interface.
 		rd: bytes.Reader
 		bytes.reader_init(&rd, transmute([]byte)(data_1_000_000_a))