Browse Source

Merge pull request #1256 from Yawning/feature/volatile-memset

Add a way to securely scrub memory
gingerBill 3 years ago
parent
commit
0bc3652fc7

+ 2 - 0
Makefile

@@ -23,6 +23,8 @@ ifeq ($(OS), Linux)
     LLVM_CONFIG=llvm-config-11
     ifneq ($(shell which llvm-config-11 2>/dev/null),)
         LLVM_CONFIG=llvm-config-11
+    else ifneq ($(shell which llvm-config-11-64 2>/dev/null),)
+        LLVM_CONFIG=llvm-config-11-64
     else
         ifneq ($(shell llvm-config --version | grep '^11\.'),)
             LLVM_CONFIG=llvm-config

+ 1 - 0
core/intrinsics/intrinsics.odin

@@ -39,6 +39,7 @@ sqrt :: proc(x: $T) -> T where type_is_float(T) ---
 mem_copy                 :: proc(dst, src: rawptr, len: int) ---
 mem_copy_non_overlapping :: proc(dst, src: rawptr, len: int) ---
 mem_zero                 :: proc(ptr: rawptr, len: int) ---
+mem_zero_volatile        :: proc(ptr: rawptr, len: int) ---
 
 
 fixed_point_mul     :: proc(lhs, rhs: $T, #const scale: uint) -> T where type_is_integer(T) ---

+ 9 - 0
core/mem/mem.odin

@@ -10,6 +10,15 @@ zero :: proc "contextless" (data: rawptr, len: int) -> rawptr {
 	intrinsics.mem_zero(data, len)
 	return data
 }
+zero_explicit :: proc "contextless" (data: rawptr, len: int) -> rawptr {
+	// This routine tries to avoid the compiler optimizing away the call,
+	// so that it is always executed.  It is intended to provided
+	// equivalent semantics to those provided by the C11 Annex K 3.7.4.1
+	// memset_s call.
+	intrinsics.mem_zero_volatile(data, len) // Use the volatile mem_zero
+	intrinsics.atomic_fence() // Prevent reordering
+	return data
+}
 zero_item :: proc "contextless" (item: $P/^$T) {
 	intrinsics.mem_zero(item, size_of(T))
 }

+ 1 - 0
src/check_builtin.cpp

@@ -2598,6 +2598,7 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32
 		break;
 
 	case BuiltinProc_mem_zero:
+	case BuiltinProc_mem_zero_volatile:
 		{
 			operand->mode = Addressing_NoValue;
 			operand->type = t_invalid;

+ 2 - 0
src/checker_builtin_procs.hpp

@@ -70,6 +70,7 @@ enum BuiltinProcId {
 	BuiltinProc_mem_copy,
 	BuiltinProc_mem_copy_non_overlapping,
 	BuiltinProc_mem_zero,
+	BuiltinProc_mem_zero_volatile,
 
 	BuiltinProc_ptr_offset,
 	BuiltinProc_ptr_sub,
@@ -322,6 +323,7 @@ gb_global BuiltinProc builtin_procs[BuiltinProc_COUNT] = {
 	{STR_LIT("mem_copy"),                 3, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
 	{STR_LIT("mem_copy_non_overlapping"), 3, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
 	{STR_LIT("mem_zero"),                 2, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
+	{STR_LIT("mem_zero_volatile"),        2, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
 
 	{STR_LIT("ptr_offset"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
 	{STR_LIT("ptr_sub"),    2, false, Expr_Expr, BuiltinProcPkg_intrinsics},

+ 12 - 1
src/llvm_backend_proc.cpp

@@ -1560,7 +1560,18 @@ lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValue const &tv,
 			len = lb_emit_conv(p, len, t_int);
 
 			unsigned alignment = 1;
-			lb_mem_zero_ptr_internal(p, ptr.value, len.value, alignment);
+			lb_mem_zero_ptr_internal(p, ptr.value, len.value, alignment, false);
+			return {};
+		}
+	case BuiltinProc_mem_zero_volatile:
+		{
+			lbValue ptr = lb_build_expr(p, ce->args[0]);
+			lbValue len = lb_build_expr(p, ce->args[1]);
+			ptr = lb_emit_conv(p, ptr, t_rawptr);
+			len = lb_emit_conv(p, len, t_int);
+
+			unsigned alignment = 1;
+			lb_mem_zero_ptr_internal(p, ptr.value, len.value, alignment, true);
 			return {};
 		}
 

+ 3 - 3
src/llvm_backend_utility.cpp

@@ -48,7 +48,7 @@ lbValue lb_correct_endianness(lbProcedure *p, lbValue value) {
 	return value;
 }
 
-void lb_mem_zero_ptr_internal(lbProcedure *p, LLVMValueRef ptr, LLVMValueRef len, unsigned alignment) {
+void lb_mem_zero_ptr_internal(lbProcedure *p, LLVMValueRef ptr, LLVMValueRef len, unsigned alignment, bool is_volatile) {
 	bool is_inlinable = false;
 
 	i64 const_len = 0;
@@ -77,7 +77,7 @@ void lb_mem_zero_ptr_internal(lbProcedure *p, LLVMValueRef ptr, LLVMValueRef len
 	args[0] = LLVMBuildPointerCast(p->builder, ptr, types[0], "");
 	args[1] = LLVMConstInt(LLVMInt8TypeInContext(p->module->ctx), 0, false);
 	args[2] = LLVMBuildIntCast2(p->builder, len, types[1], /*signed*/false, "");
-	args[3] = LLVMConstInt(LLVMInt1TypeInContext(p->module->ctx), 0, false); // is_volatile parameter
+	args[3] = LLVMConstInt(LLVMInt1TypeInContext(p->module->ctx), is_volatile, false);
 
 	LLVMBuildCall(p->builder, ip, args, gb_count_of(args), "");
 }
@@ -93,7 +93,7 @@ void lb_mem_zero_ptr(lbProcedure *p, LLVMValueRef ptr, Type *type, unsigned alig
 		{
 			// NOTE(bill): Enforce zeroing through memset to make sure padding is zeroed too
 			i32 sz = cast(i32)type_size_of(type);
-			lb_mem_zero_ptr_internal(p, ptr, lb_const_int(p->module, t_int, sz).value, alignment);
+			lb_mem_zero_ptr_internal(p, ptr, lb_const_int(p->module, t_int, sz).value, alignment, false);
 		}
 		break;
 	default: