瀏覽代碼

core/crypto/aes: Use NIST terminology for the IV

Yawning Angel 1 年之前
父節點
當前提交
708f053fe6

+ 25 - 25
core/crypto/aes/aes_gcm.odin

@@ -7,10 +7,10 @@ import "core:crypto/_aes/ct64"
 import "core:encoding/endian"
 import "core:encoding/endian"
 import "core:mem"
 import "core:mem"
 
 
-// GCM_NONCE_SIZE is the default size of the GCM nonce in bytes.
-GCM_NONCE_SIZE :: 12
-// GCM_NONCE_SIZE_MAX is the maximum size of the GCM nonce in bytes.
-GCM_NONCE_SIZE_MAX :: 0x2000000000000000 // floor((2^64 - 1) / 8) bits
+// GCM_IV_SIZE is the default size of the GCM IV in bytes.
+GCM_IV_SIZE :: 12
+// GCM_IV_SIZE_MAX is the maximum size of the GCM IV in bytes.
+GCM_IV_SIZE_MAX :: 0x2000000000000000 // floor((2^64 - 1) / 8) bits
 // GCM_TAG_SIZE is the size of a GCM tag in bytes.
 // GCM_TAG_SIZE is the size of a GCM tag in bytes.
 GCM_TAG_SIZE :: _aes.GHASH_TAG_SIZE
 GCM_TAG_SIZE :: _aes.GHASH_TAG_SIZE
 
 
@@ -32,13 +32,13 @@ init_gcm :: proc(ctx: ^Context_GCM, key: []byte, impl := Implementation.Hardware
 }
 }
 
 
 // seal_gcm encrypts the plaintext and authenticates the aad and ciphertext,
 // seal_gcm encrypts the plaintext and authenticates the aad and ciphertext,
-// with the provided Context_GCM and nonce, stores the output in dst and tag.
+// with the provided Context_GCM and iv, stores the output in dst and tag.
 //
 //
 // dst and plaintext MUST alias exactly or not at all.
 // dst and plaintext MUST alias exactly or not at all.
-seal_gcm :: proc(ctx: ^Context_GCM, dst, tag, nonce, aad, plaintext: []byte) {
+seal_gcm :: proc(ctx: ^Context_GCM, dst, tag, iv, aad, plaintext: []byte) {
 	assert(ctx._is_initialized)
 	assert(ctx._is_initialized)
 
 
-	gcm_validate_common_slice_sizes(tag, nonce, aad, plaintext)
+	gcm_validate_common_slice_sizes(tag, iv, aad, plaintext)
 	if len(dst) != len(plaintext) {
 	if len(dst) != len(plaintext) {
 		panic("crypto/aes: invalid destination ciphertext size")
 		panic("crypto/aes: invalid destination ciphertext size")
 	}
 	}
@@ -47,7 +47,7 @@ seal_gcm :: proc(ctx: ^Context_GCM, dst, tag, nonce, aad, plaintext: []byte) {
 	}
 	}
 
 
 	if impl, is_hw := ctx._impl.(Context_Impl_Hardware); is_hw {
 	if impl, is_hw := ctx._impl.(Context_Impl_Hardware); is_hw {
-		gcm_seal_hw(&impl, dst, tag, nonce, aad, plaintext)
+		gcm_seal_hw(&impl, dst, tag, iv, aad, plaintext)
 		return
 		return
 	}
 	}
 
 
@@ -55,7 +55,7 @@ seal_gcm :: proc(ctx: ^Context_GCM, dst, tag, nonce, aad, plaintext: []byte) {
 	j0: [_aes.GHASH_BLOCK_SIZE]byte
 	j0: [_aes.GHASH_BLOCK_SIZE]byte
 	j0_enc: [_aes.GHASH_BLOCK_SIZE]byte
 	j0_enc: [_aes.GHASH_BLOCK_SIZE]byte
 	s: [_aes.GHASH_TAG_SIZE]byte
 	s: [_aes.GHASH_TAG_SIZE]byte
-	init_ghash_ct64(ctx, &h, &j0, &j0_enc, nonce)
+	init_ghash_ct64(ctx, &h, &j0, &j0_enc, iv)
 
 
 	// Note: Our GHASH implementation handles appending padding.
 	// Note: Our GHASH implementation handles appending padding.
 	ct64.ghash(s[:], h[:], aad)
 	ct64.ghash(s[:], h[:], aad)
@@ -69,15 +69,15 @@ seal_gcm :: proc(ctx: ^Context_GCM, dst, tag, nonce, aad, plaintext: []byte) {
 }
 }
 
 
 // open_gcm authenticates the aad and ciphertext, and decrypts the ciphertext,
 // open_gcm authenticates the aad and ciphertext, and decrypts the ciphertext,
-// with the provided Context_GCM, nonce, and tag, and stores the output in dst,
+// with the provided Context_GCM, iv, and tag, and stores the output in dst,
 // returning true iff the authentication was successful.  If authentication
 // returning true iff the authentication was successful.  If authentication
 // fails, the destination buffer will be zeroed.
 // fails, the destination buffer will be zeroed.
 //
 //
 // dst and plaintext MUST alias exactly or not at all.
 // dst and plaintext MUST alias exactly or not at all.
-open_gcm :: proc(ctx: ^Context_GCM, dst, nonce, aad, ciphertext, tag: []byte) -> bool {
+open_gcm :: proc(ctx: ^Context_GCM, dst, iv, aad, ciphertext, tag: []byte) -> bool {
 	assert(ctx._is_initialized)
 	assert(ctx._is_initialized)
 
 
-	gcm_validate_common_slice_sizes(tag, nonce, aad, ciphertext)
+	gcm_validate_common_slice_sizes(tag, iv, aad, ciphertext)
 	if len(dst) != len(ciphertext) {
 	if len(dst) != len(ciphertext) {
 		panic("crypto/aes: invalid destination plaintext size")
 		panic("crypto/aes: invalid destination plaintext size")
 	}
 	}
@@ -86,14 +86,14 @@ open_gcm :: proc(ctx: ^Context_GCM, dst, nonce, aad, ciphertext, tag: []byte) ->
 	}
 	}
 
 
 	if impl, is_hw := ctx._impl.(Context_Impl_Hardware); is_hw {
 	if impl, is_hw := ctx._impl.(Context_Impl_Hardware); is_hw {
-		return gcm_open_hw(&impl, dst, nonce, aad, ciphertext, tag)
+		return gcm_open_hw(&impl, dst, iv, aad, ciphertext, tag)
 	}
 	}
 
 
 	h: [_aes.GHASH_KEY_SIZE]byte
 	h: [_aes.GHASH_KEY_SIZE]byte
 	j0: [_aes.GHASH_BLOCK_SIZE]byte
 	j0: [_aes.GHASH_BLOCK_SIZE]byte
 	j0_enc: [_aes.GHASH_BLOCK_SIZE]byte
 	j0_enc: [_aes.GHASH_BLOCK_SIZE]byte
 	s: [_aes.GHASH_TAG_SIZE]byte
 	s: [_aes.GHASH_TAG_SIZE]byte
-	init_ghash_ct64(ctx, &h, &j0, &j0_enc, nonce)
+	init_ghash_ct64(ctx, &h, &j0, &j0_enc, iv)
 
 
 	ct64.ghash(s[:], h[:], aad)
 	ct64.ghash(s[:], h[:], aad)
 	gctr_ct64(ctx, dst, &s, ciphertext, &h, &j0, false)
 	gctr_ct64(ctx, dst, &s, ciphertext, &h, &j0, false)
@@ -120,14 +120,14 @@ reset_gcm :: proc "contextless" (ctx: ^Context_GCM) {
 }
 }
 
 
 @(private = "file")
 @(private = "file")
-gcm_validate_common_slice_sizes :: proc(tag, nonce, aad, text: []byte) {
+gcm_validate_common_slice_sizes :: proc(tag, iv, aad, text: []byte) {
 	if len(tag) != GCM_TAG_SIZE {
 	if len(tag) != GCM_TAG_SIZE {
 		panic("crypto/aes: invalid GCM tag size")
 		panic("crypto/aes: invalid GCM tag size")
 	}
 	}
 
 
-	// The specification supports nonces in the range [1, 2^64) bits.
-	if l := len(nonce); l == 0 || u64(l) >= GCM_NONCE_SIZE_MAX {
-		panic("crypto/aes: invalid GCM nonce size")
+	// The specification supports IVs in the range [1, 2^64) bits.
+	if l := len(iv); l == 0 || u64(l) >= GCM_IV_SIZE_MAX {
+		panic("crypto/aes: invalid GCM IV size")
 	}
 	}
 
 
 	if aad_len := u64(len(aad)); aad_len > GCM_A_MAX {
 	if aad_len := u64(len(aad)); aad_len > GCM_A_MAX {
@@ -144,7 +144,7 @@ init_ghash_ct64 :: proc(
 	h: ^[_aes.GHASH_KEY_SIZE]byte,
 	h: ^[_aes.GHASH_KEY_SIZE]byte,
 	j0: ^[_aes.GHASH_BLOCK_SIZE]byte,
 	j0: ^[_aes.GHASH_BLOCK_SIZE]byte,
 	j0_enc: ^[_aes.GHASH_BLOCK_SIZE]byte,
 	j0_enc: ^[_aes.GHASH_BLOCK_SIZE]byte,
-	nonce: []byte,
+	iv: []byte,
 ) {
 ) {
 	impl := &ctx._impl.(ct64.Context)
 	impl := &ctx._impl.(ct64.Context)
 
 
@@ -152,14 +152,14 @@ init_ghash_ct64 :: proc(
 	ct64.encrypt_block(impl, h[:], h[:])
 	ct64.encrypt_block(impl, h[:], h[:])
 
 
 	// Define a block, J0, as follows:
 	// Define a block, J0, as follows:
-	if l := len(nonce); l == GCM_NONCE_SIZE {
+	if l := len(iv); l == GCM_IV_SIZE {
 		// if len(IV) = 96, then let J0 = IV || 0^31 || 1
 		// if len(IV) = 96, then let J0 = IV || 0^31 || 1
-		copy(j0[:], nonce)
+		copy(j0[:], iv)
 		j0[_aes.GHASH_BLOCK_SIZE - 1] = 1
 		j0[_aes.GHASH_BLOCK_SIZE - 1] = 1
 	} else {
 	} else {
 		// If len(IV) != 96, then let s = 128 ceil(len(IV)/128) - len(IV),
 		// If len(IV) != 96, then let s = 128 ceil(len(IV)/128) - len(IV),
 		// and let J0 = GHASHH(IV || 0^(s+64) || ceil(len(IV))^64).
 		// and let J0 = GHASHH(IV || 0^(s+64) || ceil(len(IV))^64).
-		ct64.ghash(j0[:], h[:], nonce)
+		ct64.ghash(j0[:], h[:], iv)
 
 
 		tmp: [_aes.GHASH_BLOCK_SIZE]byte
 		tmp: [_aes.GHASH_BLOCK_SIZE]byte
 		endian.unchecked_put_u64be(tmp[8:], u64(l) * 8)
 		endian.unchecked_put_u64be(tmp[8:], u64(l) * 8)
@@ -197,7 +197,7 @@ gctr_ct64 :: proc(
 	s: ^[_aes.GHASH_BLOCK_SIZE]byte,
 	s: ^[_aes.GHASH_BLOCK_SIZE]byte,
 	src: []byte,
 	src: []byte,
 	h: ^[_aes.GHASH_KEY_SIZE]byte,
 	h: ^[_aes.GHASH_KEY_SIZE]byte,
-	nonce: ^[_aes.GHASH_BLOCK_SIZE]byte,
+	iv: ^[_aes.GHASH_BLOCK_SIZE]byte,
 	is_seal: bool,
 	is_seal: bool,
 ) #no_bounds_check {
 ) #no_bounds_check {
 	ct64_inc_ctr32 := #force_inline proc "contextless" (dst: []byte, ctr: u32) -> u32 {
 	ct64_inc_ctr32 := #force_inline proc "contextless" (dst: []byte, ctr: u32) -> u32 {
@@ -208,14 +208,14 @@ gctr_ct64 :: proc(
 	// Setup the counter blocks.
 	// Setup the counter blocks.
 	tmp, tmp2: [ct64.STRIDE][BLOCK_SIZE]byte = ---, ---
 	tmp, tmp2: [ct64.STRIDE][BLOCK_SIZE]byte = ---, ---
 	ctrs, blks: [ct64.STRIDE][]byte = ---, ---
 	ctrs, blks: [ct64.STRIDE][]byte = ---, ---
-	ctr := endian.unchecked_get_u32be(nonce[GCM_NONCE_SIZE:]) + 1
+	ctr := endian.unchecked_get_u32be(iv[GCM_IV_SIZE:]) + 1
 	for i in 0 ..< ct64.STRIDE {
 	for i in 0 ..< ct64.STRIDE {
 		// Setup scratch space for the keystream.
 		// Setup scratch space for the keystream.
 		blks[i] = tmp2[i][:]
 		blks[i] = tmp2[i][:]
 
 
 		// Pre-copy the IV to all the counter blocks.
 		// Pre-copy the IV to all the counter blocks.
 		ctrs[i] = tmp[i][:]
 		ctrs[i] = tmp[i][:]
-		copy(ctrs[i], nonce[:GCM_NONCE_SIZE])
+		copy(ctrs[i], iv[:GCM_IV_SIZE])
 	}
 	}
 
 
 	impl := &ctx._impl.(ct64.Context)
 	impl := &ctx._impl.(ct64.Context)

+ 11 - 11
core/crypto/aes/aes_gcm_hw_intel.odin

@@ -10,12 +10,12 @@ import "core:mem"
 import "core:simd/x86"
 import "core:simd/x86"
 
 
 @(private)
 @(private)
-gcm_seal_hw :: proc(ctx: ^Context_Impl_Hardware, dst, tag, nonce, aad, plaintext: []byte) {
+gcm_seal_hw :: proc(ctx: ^Context_Impl_Hardware, dst, tag, iv, aad, plaintext: []byte) {
 	h: [_aes.GHASH_KEY_SIZE]byte
 	h: [_aes.GHASH_KEY_SIZE]byte
 	j0: [_aes.GHASH_BLOCK_SIZE]byte
 	j0: [_aes.GHASH_BLOCK_SIZE]byte
 	j0_enc: [_aes.GHASH_BLOCK_SIZE]byte
 	j0_enc: [_aes.GHASH_BLOCK_SIZE]byte
 	s: [_aes.GHASH_TAG_SIZE]byte
 	s: [_aes.GHASH_TAG_SIZE]byte
-	init_ghash_hw(ctx, &h, &j0, &j0_enc, nonce)
+	init_ghash_hw(ctx, &h, &j0, &j0_enc, iv)
 
 
 	// Note: Our GHASH implementation handles appending padding.
 	// Note: Our GHASH implementation handles appending padding.
 	hw_intel.ghash(s[:], h[:], aad)
 	hw_intel.ghash(s[:], h[:], aad)
@@ -29,12 +29,12 @@ gcm_seal_hw :: proc(ctx: ^Context_Impl_Hardware, dst, tag, nonce, aad, plaintext
 }
 }
 
 
 @(private)
 @(private)
-gcm_open_hw :: proc(ctx: ^Context_Impl_Hardware, dst, nonce, aad, ciphertext, tag: []byte) -> bool {
+gcm_open_hw :: proc(ctx: ^Context_Impl_Hardware, dst, iv, aad, ciphertext, tag: []byte) -> bool {
 	h: [_aes.GHASH_KEY_SIZE]byte
 	h: [_aes.GHASH_KEY_SIZE]byte
 	j0: [_aes.GHASH_BLOCK_SIZE]byte
 	j0: [_aes.GHASH_BLOCK_SIZE]byte
 	j0_enc: [_aes.GHASH_BLOCK_SIZE]byte
 	j0_enc: [_aes.GHASH_BLOCK_SIZE]byte
 	s: [_aes.GHASH_TAG_SIZE]byte
 	s: [_aes.GHASH_TAG_SIZE]byte
-	init_ghash_hw(ctx, &h, &j0, &j0_enc, nonce)
+	init_ghash_hw(ctx, &h, &j0, &j0_enc, iv)
 
 
 	hw_intel.ghash(s[:], h[:], aad)
 	hw_intel.ghash(s[:], h[:], aad)
 	gctr_hw(ctx, dst, &s, ciphertext, &h, &j0, false)
 	gctr_hw(ctx, dst, &s, ciphertext, &h, &j0, false)
@@ -59,20 +59,20 @@ init_ghash_hw :: proc(
 	h: ^[_aes.GHASH_KEY_SIZE]byte,
 	h: ^[_aes.GHASH_KEY_SIZE]byte,
 	j0: ^[_aes.GHASH_BLOCK_SIZE]byte,
 	j0: ^[_aes.GHASH_BLOCK_SIZE]byte,
 	j0_enc: ^[_aes.GHASH_BLOCK_SIZE]byte,
 	j0_enc: ^[_aes.GHASH_BLOCK_SIZE]byte,
-	nonce: []byte,
+	iv: []byte,
 ) {
 ) {
 	// 1. Let H = CIPH(k, 0^128)
 	// 1. Let H = CIPH(k, 0^128)
 	encrypt_block_hw(ctx, h[:], h[:])
 	encrypt_block_hw(ctx, h[:], h[:])
 
 
 	// Define a block, J0, as follows:
 	// Define a block, J0, as follows:
-	if l := len(nonce); l == GCM_NONCE_SIZE {
+	if l := len(iv); l == GCM_IV_SIZE {
 		// if len(IV) = 96, then let J0 = IV || 0^31 || 1
 		// if len(IV) = 96, then let J0 = IV || 0^31 || 1
-		copy(j0[:], nonce)
+		copy(j0[:], iv)
 		j0[_aes.GHASH_BLOCK_SIZE - 1] = 1
 		j0[_aes.GHASH_BLOCK_SIZE - 1] = 1
 	} else {
 	} else {
 		// If len(IV) != 96, then let s = 128 ceil(len(IV)/128) - len(IV),
 		// If len(IV) != 96, then let s = 128 ceil(len(IV)/128) - len(IV),
 		// and let J0 = GHASHH(IV || 0^(s+64) || ceil(len(IV))^64).
 		// and let J0 = GHASHH(IV || 0^(s+64) || ceil(len(IV))^64).
-		hw_intel.ghash(j0[:], h[:], nonce)
+		hw_intel.ghash(j0[:], h[:], iv)
 
 
 		tmp: [_aes.GHASH_BLOCK_SIZE]byte
 		tmp: [_aes.GHASH_BLOCK_SIZE]byte
 		endian.unchecked_put_u64be(tmp[8:], u64(l) * 8)
 		endian.unchecked_put_u64be(tmp[8:], u64(l) * 8)
@@ -109,7 +109,7 @@ gctr_hw :: proc(
 	s: ^[_aes.GHASH_BLOCK_SIZE]byte,
 	s: ^[_aes.GHASH_BLOCK_SIZE]byte,
 	src: []byte,
 	src: []byte,
 	h: ^[_aes.GHASH_KEY_SIZE]byte,
 	h: ^[_aes.GHASH_KEY_SIZE]byte,
-	nonce: ^[_aes.GHASH_BLOCK_SIZE]byte,
+	iv: ^[_aes.GHASH_BLOCK_SIZE]byte,
 	is_seal: bool,
 	is_seal: bool,
 ) #no_bounds_check {
 ) #no_bounds_check {
 	sks: [15]x86.__m128i = ---
 	sks: [15]x86.__m128i = ---
@@ -118,8 +118,8 @@ gctr_hw :: proc(
 	}
 	}
 
 
 	// Setup the counter block
 	// Setup the counter block
-	ctr_blk := intrinsics.unaligned_load((^x86.__m128i)(nonce))
-	ctr := endian.unchecked_get_u32be(nonce[GCM_NONCE_SIZE:]) + 1
+	ctr_blk := intrinsics.unaligned_load((^x86.__m128i)(iv))
+	ctr := endian.unchecked_get_u32be(iv[GCM_IV_SIZE:]) + 1
 
 
 	src, dst := src, dst
 	src, dst := src, dst
 
 

+ 2 - 2
core/crypto/aes/aes_impl_hw_gen.odin

@@ -34,11 +34,11 @@ ctr_blocks_hw :: proc(ctx: ^Context_CTR, dst, src: []byte, nr_blocks: int) {
 }
 }
 
 
 @(private)
 @(private)
-gcm_seal_hw :: proc(ctx: ^Context_Impl_Hardware, dst, tag, nonce, aad, plaintext: []byte) {
+gcm_seal_hw :: proc(ctx: ^Context_Impl_Hardware, dst, tag, iv, aad, plaintext: []byte) {
 	panic(ERR_HW_NOT_SUPPORTED)
 	panic(ERR_HW_NOT_SUPPORTED)
 }
 }
 
 
 @(private)
 @(private)
-gcm_open_hw :: proc(ctx: ^Context_Impl_Hardware, dst, nonce, aad, ciphertext, tag: []byte) -> bool {
+gcm_open_hw :: proc(ctx: ^Context_Impl_Hardware, dst, iv, aad, ciphertext, tag: []byte) -> bool {
 	panic(ERR_HW_NOT_SUPPORTED)
 	panic(ERR_HW_NOT_SUPPORTED)
 }
 }

+ 2 - 2
tests/core/crypto/test_core_crypto_aes.odin

@@ -197,8 +197,8 @@ test_aes_ctr :: proc(t: ^testing.T, impl: aes.Implementation) {
 
 
 	ctx: aes.Context_CTR
 	ctx: aes.Context_CTR
 	key: [aes.KEY_SIZE_256]byte
 	key: [aes.KEY_SIZE_256]byte
-	nonce: [aes.CTR_IV_SIZE]byte
-	aes.init_ctr(&ctx, key[:], nonce[:], impl)
+	iv: [aes.CTR_IV_SIZE]byte
+	aes.init_ctr(&ctx, key[:], iv[:], impl)
 
 
 	h_ctx: sha2.Context_512
 	h_ctx: sha2.Context_512
 	sha2.init_512_256(&h_ctx)
 	sha2.init_512_256(&h_ctx)