Browse Source

Check for unneeded `transmute` with `-vet-cast`

gingerBill 1 year ago
parent
commit
4b71c47fd5

+ 1 - 1
base/runtime/random_generator.odin

@@ -89,7 +89,7 @@ default_random_generator_proc :: proc(data: rawptr, mode: Random_Generator_Mode,
 		switch len(p) {
 		case size_of(u64):
 			// Fast path for a 64-bit destination.
-			intrinsics.unaligned_store(transmute(^u64)raw_data(p), read_u64(r))
+			intrinsics.unaligned_store((^u64)(raw_data(p)), read_u64(r))
 		case:
 			// All other cases.
 			pos := i8(0)

+ 3 - 6
core/compress/shoco/shoco.odin

@@ -274,12 +274,9 @@ compress_string_to_buffer :: proc(input: string, output: []u8, model := DEFAULT_
 				out_ptr := raw_data(output[out:])
 
 				switch pack.bytes_packed {
-				case 4:
-					intrinsics.unaligned_store(transmute(^u32)out_ptr, code)
-				case 2:
-					intrinsics.unaligned_store(transmute(^u16)out_ptr, u16(code))
-				case 1:
-					intrinsics.unaligned_store(transmute(^u8)out_ptr,  u8(code))
+				case 4: intrinsics.unaligned_store((^u32)(out_ptr), code)
+				case 2: intrinsics.unaligned_store((^u16)(out_ptr), u16(code))
+				case 1: intrinsics.unaligned_store( (^u8)(out_ptr),  u8(code))
 				case:
 					return out, .Unknown_Compression_Method
 				}

+ 2 - 2
core/crypto/_edwards25519/edwards25519.odin

@@ -110,7 +110,7 @@ ge_set_bytes :: proc "contextless" (ge: ^Group_Element, b: []byte) -> bool {
 	if len(b) != 32 {
 		intrinsics.trap()
 	}
-	b_ := transmute(^[32]byte)(raw_data(b))
+	b_ := (^[32]byte)(raw_data(b))
 
 	// Do the work in a scratch element, so that ge is unchanged on
 	// failure.
@@ -169,7 +169,7 @@ ge_bytes :: proc "contextless" (ge: ^Group_Element, dst: []byte) {
 	if len(dst) != 32 {
 		intrinsics.trap()
 	}
-	dst_ := transmute(^[32]byte)(raw_data(dst))
+	dst_ := (^[32]byte)(raw_data(dst))
 
 	// Convert the element to affine (x, y) representation.
 	x, y, z_inv: field.Tight_Field_Element = ---, ---, ---

+ 2 - 2
core/crypto/_edwards25519/edwards25519_scalar.odin

@@ -28,7 +28,7 @@ sc_set_bytes :: proc "contextless" (sc: ^Scalar, b: []byte) -> bool {
 	if len(b) != 32 {
 		intrinsics.trap()
 	}
-	b_ := transmute(^[32]byte)(raw_data(b))
+	b_ := (^[32]byte)(raw_data(b))
 	return field.fe_from_bytes(sc, b_)
 }
 
@@ -36,7 +36,7 @@ sc_set_bytes_rfc8032 :: proc "contextless" (sc: ^Scalar, b: []byte) {
 	if len(b) != 32 {
 		intrinsics.trap()
 	}
-	b_ := transmute(^[32]byte)(raw_data(b))
+	b_ := (^[32]byte)(raw_data(b))
 	field.fe_from_bytes_rfc8032(sc, b_)
 }
 

+ 2 - 2
core/crypto/_fiat/field_curve25519/field.odin

@@ -6,13 +6,13 @@ import "core:mem"
 fe_relax_cast :: #force_inline proc "contextless" (
 	arg1: ^Tight_Field_Element,
 ) -> ^Loose_Field_Element {
-	return transmute(^Loose_Field_Element)(arg1)
+	return (^Loose_Field_Element)(arg1)
 }
 
 fe_tighten_cast :: #force_inline proc "contextless" (
 	arg1: ^Loose_Field_Element,
 ) -> ^Tight_Field_Element {
-	return transmute(^Tight_Field_Element)(arg1)
+	return (^Tight_Field_Element)(arg1)
 }
 
 fe_clear :: proc "contextless" (

+ 2 - 2
core/crypto/_fiat/field_poly1305/field.odin

@@ -7,13 +7,13 @@ import "core:mem"
 fe_relax_cast :: #force_inline proc "contextless" (
 	arg1: ^Tight_Field_Element,
 ) -> ^Loose_Field_Element {
-	return transmute(^Loose_Field_Element)(arg1)
+	return (^Loose_Field_Element)(arg1)
 }
 
 fe_tighten_cast :: #force_inline proc "contextless" (
 	arg1: ^Loose_Field_Element,
 ) -> ^Tight_Field_Element {
-	return transmute(^Tight_Field_Element)(arg1)
+	return (^Tight_Field_Element)(arg1)
 }
 
 fe_from_bytes :: #force_inline proc "contextless" (

+ 5 - 5
core/crypto/kmac/kmac.odin

@@ -61,7 +61,7 @@ init_256 :: proc(ctx: ^Context, key, domain_sep: []byte) {
 update :: proc(ctx: ^Context, data: []byte) {
 	assert(ctx.is_initialized)
 
-	shake.write(transmute(^shake.Context)(ctx), data)
+	shake.write((^shake.Context)(ctx), data)
 }
 
 // final finalizes the Context, writes the tag to dst, and calls reset
@@ -75,7 +75,7 @@ final :: proc(ctx: ^Context, dst: []byte) {
 		panic("crypto/kmac: invalid KMAC tag_size, too short")
 	}
 
-	_sha3.final_cshake(transmute(^_sha3.Context)(ctx), dst)
+	_sha3.final_cshake((^_sha3.Context)(ctx), dst)
 }
 
 // clone clones the Context other into ctx.
@@ -84,7 +84,7 @@ clone :: proc(ctx, other: ^Context) {
 		return
 	}
 
-	shake.clone(transmute(^shake.Context)(ctx), transmute(^shake.Context)(other))
+	shake.clone((^shake.Context)(ctx), (^shake.Context)(other))
 }
 
 // reset sanitizes the Context.  The Context must be re-initialized to
@@ -94,7 +94,7 @@ reset :: proc(ctx: ^Context) {
 		return
 	}
 
-	shake.reset(transmute(^shake.Context)(ctx))
+	shake.reset((^shake.Context)(ctx))
 }
 
 @(private)
@@ -107,7 +107,7 @@ _init_kmac :: proc(ctx: ^Context, key, s: []byte, sec_strength: int) {
 		panic("crypto/kmac: invalid KMAC key, too short")
 	}
 
-	ctx_ := transmute(^_sha3.Context)(ctx)
+	ctx_ := (^_sha3.Context)(ctx)
 	_sha3.init_cshake(ctx_, N_KMAC, s, sec_strength)
 	_sha3.bytepad(ctx_, [][]byte{key}, _sha3.rate_cshake(sec_strength))
 }

+ 5 - 5
core/crypto/legacy/keccak/keccak.odin

@@ -66,12 +66,12 @@ init_512 :: proc(ctx: ^Context) {
 @(private)
 _init :: proc(ctx: ^Context) {
 	ctx.dsbyte = _sha3.DS_KECCAK
-	_sha3.init(transmute(^_sha3.Context)(ctx))
+	_sha3.init((^_sha3.Context)(ctx))
 }
 
 // update adds more data to the Context.
 update :: proc(ctx: ^Context, data: []byte) {
-	_sha3.update(transmute(^_sha3.Context)(ctx), data)
+	_sha3.update((^_sha3.Context)(ctx), data)
 }
 
 // final finalizes the Context, writes the digest to hash, and calls
@@ -80,16 +80,16 @@ update :: proc(ctx: ^Context, data: []byte) {
 // Iff finalize_clone is set, final will work on a copy of the Context,
 // which is useful for for calculating rolling digests.
 final :: proc(ctx: ^Context, hash: []byte, finalize_clone: bool = false) {
-	_sha3.final(transmute(^_sha3.Context)(ctx), hash, finalize_clone)
+	_sha3.final((^_sha3.Context)(ctx), hash, finalize_clone)
 }
 
 // clone clones the Context other into ctx.
 clone :: proc(ctx, other: ^Context) {
-	_sha3.clone(transmute(^_sha3.Context)(ctx), transmute(^_sha3.Context)(other))
+	_sha3.clone((^_sha3.Context)(ctx), (^_sha3.Context)(other))
 }
 
 // reset sanitizes the Context.  The Context must be re-initialized to
 // be used again.
 reset :: proc(ctx: ^Context) {
-	_sha3.reset(transmute(^_sha3.Context)(ctx))
+	_sha3.reset((^_sha3.Context)(ctx))
 }

+ 3 - 3
core/crypto/ristretto255/ristretto255.odin

@@ -112,7 +112,7 @@ ge_set_bytes :: proc "contextless" (ge: ^Group_Element, b: []byte) -> bool {
 		return false
 	}
 
-	b_ := transmute(^[32]byte)(raw_data(b))
+	b_ := (^[32]byte)(raw_data(b))
 
 	s: field.Tight_Field_Element = ---
 	defer field.fe_clear(&s)
@@ -297,7 +297,7 @@ ge_bytes :: proc(ge: ^Group_Element, dst: []byte) {
 	// 2.  Return the 32-byte little-endian encoding of s.  More
 	// specifically, this is the encoding of the canonical
 	// representation of s as an integer between 0 and p-1, inclusive.
-	dst_ := transmute(^[32]byte)(raw_data(dst))
+	dst_ := (^[32]byte)(raw_data(dst))
 	field.fe_to_bytes(dst_, &tmp)
 
 	field.fe_clear_vec([]^field.Tight_Field_Element{&u1, &u2, &tmp, &z_inv, &ix0, &iy0, &x, &y})
@@ -417,7 +417,7 @@ ge_is_identity :: proc(ge: ^Group_Element) -> int {
 
 @(private)
 ge_map :: proc "contextless" (ge: ^Group_Element, b: []byte) {
-	b_ := transmute(^[32]byte)(raw_data(b))
+	b_ := (^[32]byte)(raw_data(b))
 
 	// The MAP function is defined on 32-byte strings as:
 	//

+ 1 - 1
core/crypto/ristretto255/ristretto255_scalar.odin

@@ -46,7 +46,7 @@ sc_set_bytes_wide :: proc(sc: ^Scalar, b: []byte) {
 		panic("crypto/ristretto255: invalid wide input size")
 	}
 
-	b_ := transmute(^[WIDE_SCALAR_SIZE]byte)(raw_data(b))
+	b_ := (^[WIDE_SCALAR_SIZE]byte)(raw_data(b))
 	grp.sc_set_bytes_wide(sc, b_)
 }
 

+ 5 - 5
core/crypto/sha3/sha3.odin

@@ -68,12 +68,12 @@ init_512 :: proc(ctx: ^Context) {
 @(private)
 _init :: proc(ctx: ^Context) {
 	ctx.dsbyte = _sha3.DS_SHA3
-	_sha3.init(transmute(^_sha3.Context)(ctx))
+	_sha3.init((^_sha3.Context)(ctx))
 }
 
 // update adds more data to the Context.
 update :: proc(ctx: ^Context, data: []byte) {
-	_sha3.update(transmute(^_sha3.Context)(ctx), data)
+	_sha3.update((^_sha3.Context)(ctx), data)
 }
 
 // final finalizes the Context, writes the digest to hash, and calls
@@ -82,16 +82,16 @@ update :: proc(ctx: ^Context, data: []byte) {
 // Iff finalize_clone is set, final will work on a copy of the Context,
 // which is useful for for calculating rolling digests.
 final :: proc(ctx: ^Context, hash: []byte, finalize_clone: bool = false) {
-	_sha3.final(transmute(^_sha3.Context)(ctx), hash, finalize_clone)
+	_sha3.final((^_sha3.Context)(ctx), hash, finalize_clone)
 }
 
 // clone clones the Context other into ctx.
 clone :: proc(ctx, other: ^Context) {
-	_sha3.clone(transmute(^_sha3.Context)(ctx), transmute(^_sha3.Context)(other))
+	_sha3.clone((^_sha3.Context)(ctx), (^_sha3.Context)(other))
 }
 
 // reset sanitizes the Context.  The Context must be re-initialized to
 // be used again.
 reset :: proc(ctx: ^Context) {
-	_sha3.reset(transmute(^_sha3.Context)(ctx))
+	_sha3.reset((^_sha3.Context)(ctx))
 }

+ 8 - 8
core/crypto/shake/shake.odin

@@ -24,35 +24,35 @@ Context :: distinct _sha3.Context
 
 // init_128 initializes a Context for SHAKE128.
 init_128 :: proc(ctx: ^Context) {
-	_sha3.init_cshake(transmute(^_sha3.Context)(ctx), nil, nil, 128)
+	_sha3.init_cshake((^_sha3.Context)(ctx), nil, nil, 128)
 }
 
 // init_256 initializes a Context for SHAKE256.
 init_256 :: proc(ctx: ^Context) {
-	_sha3.init_cshake(transmute(^_sha3.Context)(ctx), nil, nil, 256)
+	_sha3.init_cshake((^_sha3.Context)(ctx), nil, nil, 256)
 }
 
 // init_cshake_128 initializes a Context for cSHAKE128.
 init_cshake_128 :: proc(ctx: ^Context, domain_sep: []byte) {
-	_sha3.init_cshake(transmute(^_sha3.Context)(ctx), nil, domain_sep, 128)
+	_sha3.init_cshake((^_sha3.Context)(ctx), nil, domain_sep, 128)
 }
 
 // init_cshake_256 initializes a Context for cSHAKE256.
 init_cshake_256 :: proc(ctx: ^Context, domain_sep: []byte) {
-	_sha3.init_cshake(transmute(^_sha3.Context)(ctx), nil, domain_sep, 256)
+	_sha3.init_cshake((^_sha3.Context)(ctx), nil, domain_sep, 256)
 }
 
 // write writes more data into the SHAKE instance.  This MUST not be called
 // after any reads have been done, and attempts to do so will panic.
 write :: proc(ctx: ^Context, data: []byte) {
-	_sha3.update(transmute(^_sha3.Context)(ctx), data)
+	_sha3.update((^_sha3.Context)(ctx), data)
 }
 
 // read reads output from the SHAKE instance.  There is no practical upper
 // limit to the amount of data that can be read from SHAKE.  After read has
 // been called one or more times, further calls to write will panic.
 read :: proc(ctx: ^Context, dst: []byte) {
-	ctx_ := transmute(^_sha3.Context)(ctx)
+	ctx_ := (^_sha3.Context)(ctx)
 	if !ctx.is_finalized {
 		_sha3.shake_xof(ctx_)
 	}
@@ -62,11 +62,11 @@ read :: proc(ctx: ^Context, dst: []byte) {
 
 // clone clones the Context other into ctx.
 clone :: proc(ctx, other: ^Context) {
-	_sha3.clone(transmute(^_sha3.Context)(ctx), transmute(^_sha3.Context)(other))
+	_sha3.clone((^_sha3.Context)(ctx), (^_sha3.Context)(other))
 }
 
 // reset sanitizes the Context.  The Context must be re-initialized to
 // be used again.
 reset :: proc(ctx: ^Context) {
-	_sha3.reset(transmute(^_sha3.Context)(ctx))
+	_sha3.reset((^_sha3.Context)(ctx))
 }

+ 7 - 7
core/crypto/tuplehash/tuplehash.odin

@@ -13,19 +13,19 @@ Context :: distinct _sha3.Context
 
 // init_128 initializes a Context for TupleHash128 or TupleHashXOF128.
 init_128 :: proc(ctx: ^Context, domain_sep: []byte) {
-	_sha3.init_cshake(transmute(^_sha3.Context)(ctx), N_TUPLEHASH, domain_sep, 128)
+	_sha3.init_cshake((^_sha3.Context)(ctx), N_TUPLEHASH, domain_sep, 128)
 }
 
 // init_256 initializes a Context for TupleHash256 or TupleHashXOF256.
 init_256 :: proc(ctx: ^Context, domain_sep: []byte) {
-	_sha3.init_cshake(transmute(^_sha3.Context)(ctx), N_TUPLEHASH, domain_sep, 256)
+	_sha3.init_cshake((^_sha3.Context)(ctx), N_TUPLEHASH, domain_sep, 256)
 }
 
 // write_element writes a tuple element into the TupleHash or TupleHashXOF
 // instance.  This MUST not be called after any reads have been done, and
 // any attempts to do so will panic.
 write_element :: proc(ctx: ^Context, data: []byte) {
-	_, _ = _sha3.encode_string(transmute(^_sha3.Context)(ctx), data)
+	_, _ = _sha3.encode_string((^_sha3.Context)(ctx), data)
 }
 
 // final finalizes the Context, writes the digest to hash, and calls
@@ -34,7 +34,7 @@ write_element :: proc(ctx: ^Context, data: []byte) {
 // Iff finalize_clone is set, final will work on a copy of the Context,
 // which is useful for for calculating rolling digests.
 final :: proc(ctx: ^Context, hash: []byte, finalize_clone: bool = false) {
-	_sha3.final_cshake(transmute(^_sha3.Context)(ctx), hash, finalize_clone)
+	_sha3.final_cshake((^_sha3.Context)(ctx), hash, finalize_clone)
 }
 
 // read reads output from the TupleHashXOF instance.  There is no practical
@@ -42,7 +42,7 @@ final :: proc(ctx: ^Context, hash: []byte, finalize_clone: bool = false) {
 // After read has been called one or more times, further calls to
 // write_element will panic.
 read :: proc(ctx: ^Context, dst: []byte) {
-	ctx_ := transmute(^_sha3.Context)(ctx)
+	ctx_ := (^_sha3.Context)(ctx)
 	if !ctx.is_finalized {
 		_sha3.encode_byte_len(ctx_, 0, false) // right_encode
 		_sha3.shake_xof(ctx_)
@@ -53,13 +53,13 @@ read :: proc(ctx: ^Context, dst: []byte) {
 
 // clone clones the Context other into ctx.
 clone :: proc(ctx, other: ^Context) {
-	_sha3.clone(transmute(^_sha3.Context)(ctx), transmute(^_sha3.Context)(other))
+	_sha3.clone((^_sha3.Context)(ctx), (^_sha3.Context)(other))
 }
 
 // reset sanitizes the Context.  The Context must be re-initialized to
 // be used again.
 reset :: proc(ctx: ^Context) {
-	_sha3.reset(transmute(^_sha3.Context)(ctx))
+	_sha3.reset((^_sha3.Context)(ctx))
 }
 
 @(private)

+ 2 - 4
core/image/qoi/qoi.odin

@@ -139,15 +139,13 @@ save_to_buffer  :: proc(output: ^bytes.Buffer, img: ^Image, options := Options{}
 					} else {
 						// Write RGB literal
 						output.buf[written] = u8(QOI_Opcode_Tag.RGB)
-						pix_bytes := transmute([4]u8)pix
-						copy(output.buf[written + 1:], pix_bytes[:3])
+						copy(output.buf[written + 1:], pix[:3])
 						written += 4
 					}
 				} else {
 					// Write RGBA literal
 					output.buf[written] = u8(QOI_Opcode_Tag.RGBA)
-					pix_bytes := transmute([4]u8)pix
-					copy(output.buf[written + 1:], pix_bytes[:])
+					copy(output.buf[written + 1:], pix[:])
 					written += 5
 				}
 			}

+ 2 - 2
core/os/os_darwin.odin

@@ -883,8 +883,8 @@ absolute_path_from_relative :: proc(rel: string) -> (path: string, err: Errno) {
 	}
 	defer _unix_free(path_ptr)
 
-	path_cstr := transmute(cstring)path_ptr
-	path = strings.clone( string(path_cstr) )
+	path_cstr := cast(cstring)path_ptr
+	path = strings.clone(string(path_cstr))
 
 	return path, ERROR_NONE
 }

+ 2 - 2
core/os/os_freebsd.odin

@@ -648,8 +648,8 @@ absolute_path_from_relative :: proc(rel: string) -> (path: string, err: Errno) {
 	}
 	defer _unix_free(path_ptr)
 
-	path_cstr := transmute(cstring)path_ptr
-	path = strings.clone( string(path_cstr) )
+
+	path = strings.clone(string(cstring(path_ptr)))
 
 	return path, ERROR_NONE
 }

+ 1 - 2
core/os/os_linux.odin

@@ -890,8 +890,7 @@ absolute_path_from_relative :: proc(rel: string) -> (path: string, err: Errno) {
 	}
 	defer _unix_free(path_ptr)
 
-	path_cstr := transmute(cstring)path_ptr
-	path = strings.clone( string(path_cstr) )
+	path = strings.clone(string(cstring(path_ptr)))
 
 	return path, ERROR_NONE
 }

+ 1 - 1
core/sys/info/cpu_intel.odin

@@ -117,7 +117,7 @@ init_cpu_name :: proc "c" () {
 		return
 	}
 
-	_buf := transmute(^[0x12]u32)&_cpu_name_buf
+	_buf := (^[0x12]u32)(&_cpu_name_buf)
 	_buf[ 0], _buf[ 1], _buf[ 2], _buf[ 3] = cpuid(0x8000_0002, 0)
 	_buf[ 4], _buf[ 5], _buf[ 6], _buf[ 7] = cpuid(0x8000_0003, 0)
 	_buf[ 8], _buf[ 9], _buf[10], _buf[11] = cpuid(0x8000_0004, 0)

+ 2 - 2
core/sys/linux/wrappers.odin

@@ -85,13 +85,13 @@ dirent_iterate_buf :: proc "contextless" (buf: []u8, offs: ^int) -> (d: ^Dirent,
 /// Obtain the name of dirent as a string
 /// The lifetime of the string is bound to the lifetime of the provided dirent structure
 dirent_name :: proc "contextless" (dirent: ^Dirent) -> string #no_bounds_check {
-	str := transmute([^]u8) &dirent.name
+	str := ([^]u8)(&dirent.name)
 	// Note(flysand): The string size calculated above applies only to the ideal case
 	// we subtract 1 byte from the string size, because a null terminator is guaranteed
 	// to be present. But! That said, the dirents are aligned to 8 bytes and the padding
 	// between the null terminator and the start of the next struct may be not initialized
 	// which means we also have to scan these garbage bytes.
-	str_size := (cast(int) dirent.reclen) - 1 - cast(int) offset_of(Dirent, name)
+	str_size := int(dirent.reclen) - 1 - cast(int)offset_of(Dirent, name)
 	// This skips *only* over the garbage, since if we're not garbage we're at nul terminator,
 	// which skips this loop
 	for str[str_size] != 0 {

+ 26 - 12
src/check_expr.cpp

@@ -3396,7 +3396,7 @@ gb_internal void check_cast(CheckerContext *c, Operand *x, Type *type, bool forb
 			) {
 				gbString oper_str = expr_to_string(x->expr);
 				gbString to_type  = type_to_string(dst_exact);
-				error(x->expr, "Unneeded cast of `%s` to identical type `%s`", oper_str, to_type);
+				error(x->expr, "Unneeded cast of '%s' to identical type '%s'", oper_str, to_type);
 				gb_string_free(oper_str);
 				gb_string_free(to_type);
 			}
@@ -3406,22 +3406,13 @@ gb_internal void check_cast(CheckerContext *c, Operand *x, Type *type, bool forb
 	x->type = type;
 }
 
-gb_internal bool check_transmute(CheckerContext *c, Ast *node, Operand *o, Type *t) {
+gb_internal bool check_transmute(CheckerContext *c, Ast *node, Operand *o, Type *t, bool forbid_identical = false) {
 	if (!is_operand_value(*o)) {
 		error(o->expr, "'transmute' can only be applied to values");
 		o->mode = Addressing_Invalid;
 		return false;
 	}
 
-	// if (o->mode == Addressing_Constant) {
-	// 	gbString expr_str = expr_to_string(o->expr);
-	// 	error(o->expr, "Cannot transmute a constant expression: '%s'", expr_str);
-	// 	gb_string_free(expr_str);
-	// 	o->mode = Addressing_Invalid;
-	// 	o->expr = node;
-	// 	return false;
-	// }
-
 	Type *src_t = o->type;
 	Type *dst_t = t;
 	Type *src_bt = base_type(src_t);
@@ -3504,6 +3495,29 @@ gb_internal bool check_transmute(CheckerContext *c, Ast *node, Operand *o, Type
 				return true;
 			}
 		}
+	} else {
+		// If we check polymorphic procedures, we risk erring on
+		// identical casts that cannot be foreseen or otherwise
+		// forbidden, so just skip them.
+		if (forbid_identical && check_vet_flags(c) & VetFlag_Cast &&
+		    (c->curr_proc_sig == nullptr || !is_type_polymorphic(c->curr_proc_sig))) {
+			bool is_runtime = false;
+			if (c->pkg && (c->pkg->kind == Package_Runtime || c->pkg->kind == Package_Builtin)) {
+				is_runtime = true;
+			}
+			if (are_types_identical(src_t, dst_t) && !is_runtime) {
+				gbString oper_str = expr_to_string(o->expr);
+				gbString to_type  = type_to_string(dst_t);
+				error(o->expr, "Unneeded transmute of '%s' to identical type '%s'", oper_str, to_type);
+				gb_string_free(oper_str);
+				gb_string_free(to_type);
+			} else if (is_type_internally_pointer_like(src_t) &&
+			           is_type_internally_pointer_like(dst_t)) {
+				gbString to_type  = type_to_string(dst_t);
+				error(o->expr, "Use of 'transmute' where 'cast' would be preferred since the types are pointer-like", to_type);
+				gb_string_free(to_type);
+			}
+		}
 	}
 
 	o->mode = Addressing_Value;
@@ -10734,7 +10748,7 @@ gb_internal ExprKind check_expr_base_internal(CheckerContext *c, Operand *o, Ast
 		if (o->mode != Addressing_Invalid) {
 			switch (tc->token.kind) {
 			case Token_transmute:
-				check_transmute(c, node, o, type);
+				check_transmute(c, node, o, type, true);
 				break;
 			case Token_cast:
 				check_cast(c, o, type, true);