Переглянути джерело

Merge pull request #5122 from Lperlind/asan-allocators

Add asan support for various allocators and stack unpoisoning
gingerBill 4 місяців тому
батько
коміт
90a30a145a

+ 8 - 0
base/runtime/default_temp_allocator_arena.odin

@@ -1,6 +1,7 @@
 package runtime
 
 import "base:intrinsics"
+import "base:sanitizer"
 
 DEFAULT_ARENA_GROWING_MINIMUM_BLOCK_SIZE :: uint(DEFAULT_TEMP_ALLOCATOR_BACKING_SIZE)
 
@@ -43,6 +44,8 @@ memory_block_alloc :: proc(allocator: Allocator, capacity: uint, alignment: uint
 	block.base = ([^]byte)(uintptr(block) + base_offset)
 	block.capacity = uint(end - uintptr(block.base))
 
+	sanitizer.address_poison(block.base, block.capacity)
+
 	// Should be zeroed
 	assert(block.used == 0)
 	assert(block.prev == nil)
@@ -52,6 +55,7 @@ memory_block_alloc :: proc(allocator: Allocator, capacity: uint, alignment: uint
 memory_block_dealloc :: proc(block_to_free: ^Memory_Block, loc := #caller_location) {
 	if block_to_free != nil {
 		allocator := block_to_free.allocator
+		sanitizer.address_unpoison(block_to_free.base, block_to_free.capacity)
 		mem_free(block_to_free, allocator, loc)
 	}
 }
@@ -83,6 +87,7 @@ alloc_from_memory_block :: proc(block: ^Memory_Block, min_size, alignment: uint)
 		return
 	}
 	data = block.base[block.used+alignment_offset:][:min_size]
+	sanitizer.address_unpoison(block.base[block.used:block.used+size])
 	block.used += size
 	return
 }
@@ -162,6 +167,7 @@ arena_free_all :: proc(arena: ^Arena, loc := #caller_location) {
 	if arena.curr_block != nil {
 		intrinsics.mem_zero(arena.curr_block.base, arena.curr_block.used)
 		arena.curr_block.used = 0
+		sanitizer.address_poison(arena.curr_block.base, arena.curr_block.capacity)
 	}
 	arena.total_used = 0
 }
@@ -226,6 +232,7 @@ arena_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
 					// grow data in-place, adjusting next allocation
 					block.used = uint(new_end)
 					data = block.base[start:new_end]
+					sanitizer.address_unpoison(data)
 					return
 				}
 			}
@@ -299,6 +306,7 @@ arena_temp_end :: proc(temp: Arena_Temp, loc := #caller_location) {
 			assert(block.used >= temp.used, "out of order use of arena_temp_end", loc)
 			amount_to_zero := block.used-temp.used
 			intrinsics.mem_zero(block.base[temp.used:], amount_to_zero)
+			sanitizer.address_poison(block.base[temp.used:block.capacity])
 			block.used = temp.used
 			arena.total_used -= amount_to_zero
 		}

+ 10 - 2
base/runtime/heap_allocator_windows.odin

@@ -1,5 +1,7 @@
 package runtime
 
+import "../sanitizer"
+
 foreign import kernel32 "system:Kernel32.lib"
 
 @(private="file")
@@ -16,7 +18,10 @@ foreign kernel32 {
 
 _heap_alloc :: proc "contextless" (size: int, zero_memory := true) -> rawptr {
 	HEAP_ZERO_MEMORY :: 0x00000008
-	return HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY if zero_memory else 0, uint(size))
+	ptr := HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY if zero_memory else 0, uint(size))
+	// NOTE(lucas): asan not guarunteed to unpoison win32 heap out of the box, do it ourselves
+	sanitizer.address_unpoison(ptr, size)
+	return ptr
 }
 _heap_resize :: proc "contextless" (ptr: rawptr, new_size: int) -> rawptr {
 	if new_size == 0 {
@@ -28,7 +33,10 @@ _heap_resize :: proc "contextless" (ptr: rawptr, new_size: int) -> rawptr {
 	}
 
 	HEAP_ZERO_MEMORY :: 0x00000008
-	return HeapReAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, ptr, uint(new_size))
+	new_ptr := HeapReAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, ptr, uint(new_size))
+	// NOTE(lucas): asan not guarunteed to unpoison win32 heap out of the box, do it ourselves
+	sanitizer.address_unpoison(new_ptr, new_size)
+	return new_ptr
 }
 _heap_free :: proc "contextless" (ptr: rawptr) {
 	if ptr == nil {

+ 7 - 0
base/runtime/internal.odin

@@ -1106,3 +1106,10 @@ __read_bits :: proc "contextless" (dst, src: [^]byte, offset: uintptr, size: uin
 		dst[j>>3]  |= the_bit<<(j&7)
 	}
 }
+
+when .Address in ODIN_SANITIZER_FLAGS {
+	foreign {
+		__asan_unpoison_memory_region :: proc "system" (address: rawptr, size: uint) ---
+	}
+}
+

+ 83 - 1
base/sanitizer/address.odin

@@ -60,6 +60,7 @@ poison or unpoison memory in the same memory region region simultaneously.
 
 When asan is not enabled this procedure does nothing.
 */
+@(no_sanitize_address)
 address_poison_slice :: proc "contextless" (region: $T/[]$E) {
 	when ASAN_ENABLED {
 		__asan_poison_memory_region(raw_data(region), size_of(E) * len(region))
@@ -75,6 +76,7 @@ can poison or unpoison memory in the same memory region region simultaneously.
 
 When asan is not enabled this procedure does nothing.
 */
+@(no_sanitize_address)
 address_unpoison_slice :: proc "contextless" (region: $T/[]$E) {
 	when ASAN_ENABLED {
 		__asan_unpoison_memory_region(raw_data(region), size_of(E) * len(region))
@@ -90,6 +92,7 @@ two threads can poison or unpoison memory in the same memory region region simul
 
 When asan is not enabled this procedure does nothing.
 */
+@(no_sanitize_address)
 address_poison_ptr :: proc "contextless" (ptr: ^$T) {
 	when ASAN_ENABLED {
 		__asan_poison_memory_region(ptr, size_of(T))
@@ -106,6 +109,7 @@ region simultaneously.
 
 When asan is not enabled this procedure does nothing.
 */
+@(no_sanitize_address)
 address_unpoison_ptr :: proc "contextless" (ptr: ^$T) {
 	when ASAN_ENABLED {
 		__asan_unpoison_memory_region(ptr, size_of(T))
@@ -121,6 +125,7 @@ poison or unpoison memory in the same memory region region simultaneously.
 
 When asan is not enabled this procedure does nothing.
 */
+@(no_sanitize_address)
 address_poison_rawptr :: proc "contextless" (ptr: rawptr, len: int) {
 	when ASAN_ENABLED {
 		assert_contextless(len >= 0)
@@ -128,6 +133,22 @@ address_poison_rawptr :: proc "contextless" (ptr: rawptr, len: int) {
 	}
 }
 
+/*
+Marks the region covering `[ptr, ptr+len)` as unaddressable
+
+Code instrumented with `-sanitize:address` is forbidden from accessing any address
+within the region. This procedure is not thread-safe because no two threads can
+poison or unpoison memory in the same memory region region simultaneously.
+
+When asan is not enabled this procedure does nothing.
+*/
+@(no_sanitize_address)
+address_poison_rawptr_uint :: proc "contextless" (ptr: rawptr, len: uint) {
+	when ASAN_ENABLED {
+		__asan_poison_memory_region(ptr, len)
+	}
+}
+
 /*
 Marks the region covering `[ptr, ptr+len)` as addressable
 
@@ -137,6 +158,7 @@ threads can poison or unpoison memory in the same memory region region simultane
 
 When asan is not enabled this procedure does nothing.
 */
+@(no_sanitize_address)
 address_unpoison_rawptr :: proc "contextless" (ptr: rawptr, len: int) {
 	when ASAN_ENABLED {
 		assert_contextless(len >= 0)
@@ -144,16 +166,34 @@ address_unpoison_rawptr :: proc "contextless" (ptr: rawptr, len: int) {
 	}
 }
 
+/*
+Marks the region covering `[ptr, ptr+len)` as addressable
+
+Code instrumented with `-sanitize:address` is allowed to access any address
+within the region again. This procedure is not thread-safe because no two
+threads can poison or unpoison memory in the same memory region region simultaneously.
+
+When asan is not enabled this procedure does nothing.
+*/
+@(no_sanitize_address)
+address_unpoison_rawptr_uint :: proc "contextless" (ptr: rawptr, len: uint) {
+	when ASAN_ENABLED {
+		__asan_unpoison_memory_region(ptr, len)
+	}
+}
+
 address_poison :: proc {
 	address_poison_slice,
 	address_poison_ptr,
 	address_poison_rawptr,
+	address_poison_rawptr_uint,
 }
 
 address_unpoison :: proc {
 	address_unpoison_slice,
 	address_unpoison_ptr,
 	address_unpoison_rawptr,
+	address_unpoison_rawptr_uint,
 }
 
 /*
@@ -164,6 +204,7 @@ This can be used for logging and/or debugging purposes.
 
 When asan is not enabled this procedure does nothing.
 */
+@(no_sanitize_address)
 address_set_death_callback :: proc "contextless" (callback: Address_Death_Callback) {
 	when ASAN_ENABLED {
 		__sanitizer_set_death_callback(callback)
@@ -178,7 +219,8 @@ in an asan error.
 
 When asan is not enabled this procedure returns `nil`.
 */
-address_region_is_poisoned_slice :: proc "contextless" (region: []$T/$E) -> rawptr {
+@(no_sanitize_address)
+address_region_is_poisoned_slice :: proc "contextless" (region: $T/[]$E) -> rawptr {
 	when ASAN_ENABLED {
 		return __asan_region_is_poisoned(raw_data(region), size_of(E) * len(region))
 	} else {
@@ -194,6 +236,7 @@ in an asan error.
 
 When asan is not enabled this procedure returns `nil`.
 */
+@(no_sanitize_address)
 address_region_is_poisoned_ptr :: proc "contextless" (ptr: ^$T) -> rawptr {
 	when ASAN_ENABLED {
 		return __asan_region_is_poisoned(ptr, size_of(T))
@@ -210,6 +253,7 @@ in an asan error.
 
 When asan is not enabled this procedure returns `nil`.
 */
+@(no_sanitize_address)
 address_region_is_poisoned_rawptr :: proc "contextless" (region: rawptr, len: int) -> rawptr {
 	when ASAN_ENABLED {
 		assert_contextless(len >= 0)
@@ -219,10 +263,29 @@ address_region_is_poisoned_rawptr :: proc "contextless" (region: rawptr, len: in
 	}
 }
 
+/*
+Checks if the memory region covered by `[ptr, ptr+len)` is poisoned.
+
+If it is poisoned this procedure returns the address which would result
+in an asan error.
+
+When asan is not enabled this procedure returns `nil`.
+*/
+@(no_sanitize_address)
+address_region_is_poisoned_rawptr_uint :: proc "contextless" (region: rawptr, len: uint) -> rawptr {
+	when ASAN_ENABLED {
+		return __asan_region_is_poisoned(region, len)
+	} else {
+		return nil
+	}
+}
+
+
 address_region_is_poisoned :: proc {
 	address_region_is_poisoned_slice,
 	address_region_is_poisoned_ptr,
 	address_region_is_poisoned_rawptr,
+	address_region_is_poisoned_rawptr_uint,
 }
 
 /*
@@ -233,6 +296,7 @@ If it is poisoned this procedure returns `true`, otherwise it returns
 
 When asan is not enabled this procedure returns `false`.
 */
+@(no_sanitize_address)
 address_is_poisoned :: proc "contextless" (address: rawptr) -> bool {
 	when ASAN_ENABLED {
 		return __asan_address_is_poisoned(address) != 0
@@ -248,6 +312,7 @@ This procedure prints the description out to `stdout`.
 
 When asan is not enabled this procedure does nothing.
 */
+@(no_sanitize_address)
 address_describe_address :: proc "contextless" (address: rawptr) {
 	when ASAN_ENABLED {
 		__asan_describe_address(address)
@@ -260,6 +325,7 @@ Returns `true` if an asan error has occured, otherwise it returns
 
 When asan is not enabled this procedure returns `false`.
 */
+@(no_sanitize_address)
 address_report_present :: proc "contextless" () -> bool {
 	when ASAN_ENABLED {
 		return __asan_report_present() != 0
@@ -275,6 +341,7 @@ If no asan error has occurd `nil` is returned.
 
 When asan is not enabled this procedure returns `nil`.
 */
+@(no_sanitize_address)
 address_get_report_pc :: proc "contextless" () -> rawptr {
 	when ASAN_ENABLED {
 		return __asan_get_report_pc()
@@ -290,6 +357,7 @@ If no asan error has occurd `nil` is returned.
 
 When asan is not enabled this procedure returns `nil`.
 */
+@(no_sanitize_address)
 address_get_report_bp :: proc "contextless" () -> rawptr {
 	when ASAN_ENABLED {
 		return __asan_get_report_bp()
@@ -305,6 +373,7 @@ If no asan error has occurd `nil` is returned.
 
 When asan is not enabled this procedure returns `nil`.
 */
+@(no_sanitize_address)
 address_get_report_sp :: proc "contextless" () -> rawptr {
 	when ASAN_ENABLED {
 		return __asan_get_report_sp()
@@ -320,6 +389,7 @@ If no asan error has occurd `nil` is returned.
 
 When asan is not enabled this procedure returns `nil`.
 */
+@(no_sanitize_address)
 address_get_report_address :: proc "contextless" () -> rawptr {
 	when ASAN_ENABLED {
 		return __asan_get_report_address()
@@ -335,6 +405,7 @@ If no asan error has occurd `.none` is returned.
 
 When asan is not enabled this procedure returns `.none`.
 */
+@(no_sanitize_address)
 address_get_report_access_type :: proc "contextless" () -> Address_Access_Type {
 	when ASAN_ENABLED {
 		if ! address_report_present() {
@@ -353,6 +424,7 @@ If no asan error has occurd `0` is returned.
 
 When asan is not enabled this procedure returns `0`.
 */
+@(no_sanitize_address)
 address_get_report_access_size :: proc "contextless" () -> uint {
 	when ASAN_ENABLED {
 		return __asan_get_report_access_size()
@@ -368,6 +440,7 @@ If no asan error has occurd an empty string is returned.
 
 When asan is not enabled this procedure returns an empty string.
 */
+@(no_sanitize_address)
 address_get_report_description :: proc "contextless" () -> string {
 	when ASAN_ENABLED {
 		return string(__asan_get_report_description())
@@ -386,6 +459,7 @@ The information provided include:
 
 When asan is not enabled this procedure returns zero initialised values.
 */
+@(no_sanitize_address)
 address_locate_address :: proc "contextless" (addr: rawptr, data: []byte) -> Address_Located_Address {
 	when ASAN_ENABLED {
 		out_addr: rawptr
@@ -404,6 +478,7 @@ The stack trace is filled into the `data` slice.
 
 When asan is not enabled this procedure returns a zero initialised value.
 */
+@(no_sanitize_address)
 address_get_alloc_stack_trace :: proc "contextless" (addr: rawptr, data: []rawptr) -> ([]rawptr, int) {
 	when ASAN_ENABLED {
 		out_thread: i32
@@ -421,6 +496,7 @@ The stack trace is filled into the `data` slice.
 
 When asan is not enabled this procedure returns zero initialised values.
 */
+@(no_sanitize_address)
 address_get_free_stack_trace :: proc "contextless" (addr: rawptr, data: []rawptr) -> ([]rawptr, int) {
 	when ASAN_ENABLED {
 		out_thread: i32
@@ -436,6 +512,7 @@ Returns the current asan shadow memory mapping.
 
 When asan is not enabled this procedure returns a zero initialised value.
 */
+@(no_sanitize_address)
 address_get_shadow_mapping :: proc "contextless" () -> Address_Shadow_Mapping {
 	when ASAN_ENABLED {
 		result: Address_Shadow_Mapping
@@ -451,6 +528,7 @@ Prints asan statistics to `stderr`
 
 When asan is not enabled this procedure does nothing.
 */
+@(no_sanitize_address)
 address_print_accumulated_stats :: proc "contextless" () {
 	when ASAN_ENABLED {
 		__asan_print_accumulated_stats()
@@ -464,6 +542,7 @@ This pointer can be then used for `address_is_in_fake_stack`.
 
 When asan is not enabled this procedure returns `nil`.
 */
+@(no_sanitize_address)
 address_get_current_fake_stack :: proc "contextless" () -> rawptr {
 	when ASAN_ENABLED {
 		return __asan_get_current_fake_stack()
@@ -477,6 +556,7 @@ Returns if an address belongs to a given fake stack and if so the region of the
 
 When asan is not enabled this procedure returns zero initialised values.
 */
+@(no_sanitize_address)
 address_is_in_fake_stack :: proc "contextless" (fake_stack: rawptr, addr: rawptr) -> ([]byte, bool) {
 	when ASAN_ENABLED {
 		begin: rawptr
@@ -496,6 +576,7 @@ i.e. a procedure such as `panic` and `os.exit`.
 
 When asan is not enabled this procedure does nothing.
 */
+@(no_sanitize_address)
 address_handle_no_return :: proc "contextless" () {
 	when ASAN_ENABLED {
 		__asan_handle_no_return()
@@ -509,6 +590,7 @@ Returns `true` if successful, otherwise it returns `false`.
 
 When asan is not enabled this procedure returns `false`.
 */
+@(no_sanitize_address)
 address_update_allocation_context :: proc "contextless" (addr: rawptr) -> bool {
 	when ASAN_ENABLED {
 		return __asan_update_allocation_context(addr) != 0

+ 3 - 1
base/sanitizer/doc.odin

@@ -14,12 +14,14 @@ related bugs. Typically asan interacts with libc but Odin code can be marked up
 with the asan runtime to extend the memory error detection outside of libc using this package.
 For more information about asan see: https://clang.llvm.org/docs/AddressSanitizer.html
 
+Procedures can be made exempt from asan when marked up with @(no_sanitize_address)
+
 ## Memory
 
 Enabled with `-sanitize:memory` when building an odin project.
 
 The memory sanitizer is another runtime memory error detector with the sole purpose to catch the
-use of uninitialized memory. This is not a very common bug in Odin as be default everything is
+use of uninitialized memory. This is not a very common bug in Odin as by default everything is
 set to zero when initialised (ZII).
 For more information about the memory sanitizer see: https://clang.llvm.org/docs/MemorySanitizer.html
 

+ 31 - 19
core/mem/rollback_stack_allocator.odin

@@ -1,6 +1,7 @@
 package mem
 
 import "base:runtime"
+import "base:sanitizer"
 
 /*
 Rollback stack default block size.
@@ -47,14 +48,14 @@ Rollback_Stack :: struct {
 	block_allocator: Allocator,
 }
 
-@(private="file", require_results)
+@(private="file", require_results, no_sanitize_address)
 rb_ptr_in_bounds :: proc(block: ^Rollback_Stack_Block, ptr: rawptr) -> bool {
 	start := raw_data(block.buffer)
 	end   := start[block.offset:]
 	return start < ptr && ptr <= end
 }
 
-@(private="file", require_results)
+@(private="file", require_results, no_sanitize_address)
 rb_find_ptr :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> (
 	parent: ^Rollback_Stack_Block,
 	block:  ^Rollback_Stack_Block,
@@ -71,7 +72,7 @@ rb_find_ptr :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> (
 	return nil, nil, nil, .Invalid_Pointer
 }
 
-@(private="file", require_results)
+@(private="file", require_results, no_sanitize_address)
 rb_find_last_alloc :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> (
 	block: ^Rollback_Stack_Block,
 	header: ^Rollback_Stack_Header,
@@ -86,9 +87,10 @@ rb_find_last_alloc :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> (
 	return nil, nil, false
 }
 
-@(private="file")
+@(private="file", no_sanitize_address)
 rb_rollback_block :: proc(block: ^Rollback_Stack_Block, header: ^Rollback_Stack_Header) {
 	header := header
+
 	for block.offset > 0 && header.is_free {
 		block.offset = header.prev_offset
 		block.last_alloc = raw_data(block.buffer)[header.prev_ptr:]
@@ -99,9 +101,10 @@ rb_rollback_block :: proc(block: ^Rollback_Stack_Block, header: ^Rollback_Stack_
 /*
 Free memory to a rollback stack allocator.
 */
-@(private="file", require_results)
+@(private="file", require_results, no_sanitize_address)
 rb_free :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> Allocator_Error {
 	parent, block, header := rb_find_ptr(stack, ptr) or_return
+
 	if header.is_free {
 		return .Invalid_Pointer
 	}
@@ -120,7 +123,7 @@ rb_free :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> Allocator_Error {
 /*
 Free all memory owned by the rollback stack allocator.
 */
-@(private="file")
+@(private="file", no_sanitize_address)
 rb_free_all :: proc(stack: ^Rollback_Stack) {
 	for block := stack.head.next_block; block != nil; /**/ {
 		next_block := block.next_block
@@ -131,12 +134,13 @@ rb_free_all :: proc(stack: ^Rollback_Stack) {
 	stack.head.next_block = nil
 	stack.head.last_alloc = nil
 	stack.head.offset = 0
+	sanitizer.address_poison(stack.head.buffer)
 }
 
 /*
 Allocate memory using the rollback stack allocator.
 */
-@(require_results)
+@(require_results, no_sanitize_address)
 rb_alloc :: proc(
 	stack: ^Rollback_Stack,
 	size: int,
@@ -153,7 +157,7 @@ rb_alloc :: proc(
 /*
 Allocate memory using the rollback stack allocator.
 */
-@(require_results)
+@(require_results, no_sanitize_address)
 rb_alloc_bytes :: proc(
 	stack: ^Rollback_Stack,
 	size: int,
@@ -170,7 +174,7 @@ rb_alloc_bytes :: proc(
 /*
 Allocate non-initialized memory using the rollback stack allocator.
 */
-@(require_results)
+@(require_results, no_sanitize_address)
 rb_alloc_non_zeroed :: proc(
 	stack: ^Rollback_Stack,
 	size: int,
@@ -184,7 +188,7 @@ rb_alloc_non_zeroed :: proc(
 /*
 Allocate non-initialized memory using the rollback stack allocator.
 */
-@(require_results)
+@(require_results, no_sanitize_address)
 rb_alloc_bytes_non_zeroed :: proc(
 	stack: ^Rollback_Stack,
 	size: int,
@@ -194,6 +198,7 @@ rb_alloc_bytes_non_zeroed :: proc(
 	assert(size >= 0, "Size must be positive or zero.", loc)
 	assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", loc)
 	parent: ^Rollback_Stack_Block
+
 	for block := stack.head; /**/; block = block.next_block {
 		when !ODIN_DISABLE_ASSERT {
 			allocated_new_block: bool
@@ -235,7 +240,9 @@ rb_alloc_bytes_non_zeroed :: proc(
 			// Prevent any further allocations on it.
 			block.offset = cast(uintptr)len(block.buffer)
 		}
-		#no_bounds_check return ptr[:size], nil
+		res := ptr[:size]
+		sanitizer.address_unpoison(res)
+		return res, nil
 	}
 	return nil, .Out_Of_Memory
 }
@@ -243,7 +250,7 @@ rb_alloc_bytes_non_zeroed :: proc(
 /*
 Resize an allocation owned by rollback stack allocator.
 */
-@(require_results)
+@(require_results, no_sanitize_address)
 rb_resize :: proc(
 	stack: ^Rollback_Stack,
 	old_ptr: rawptr,
@@ -266,7 +273,7 @@ rb_resize :: proc(
 /*
 Resize an allocation owned by rollback stack allocator.
 */
-@(require_results)
+@(require_results, no_sanitize_address)
 rb_resize_bytes :: proc(
 	stack: ^Rollback_Stack,
 	old_memory: []byte,
@@ -289,7 +296,7 @@ rb_resize_bytes :: proc(
 Resize an allocation owned by rollback stack allocator without explicit
 zero-initialization.
 */
-@(require_results)
+@(require_results, no_sanitize_address)
 rb_resize_non_zeroed :: proc(
 	stack: ^Rollback_Stack,
 	old_ptr: rawptr,
@@ -306,7 +313,7 @@ rb_resize_non_zeroed :: proc(
 Resize an allocation owned by rollback stack allocator without explicit
 zero-initialization.
 */
-@(require_results)
+@(require_results, no_sanitize_address)
 rb_resize_bytes_non_zeroed :: proc(
 	stack: ^Rollback_Stack,
 	old_memory: []byte,
@@ -330,7 +337,9 @@ rb_resize_bytes_non_zeroed :: proc(
 				if len(block.buffer) <= stack.block_size {
 					block.offset += cast(uintptr)size - cast(uintptr)old_size
 				}
-				#no_bounds_check return (ptr)[:size], nil
+				res := (ptr)[:size]
+				sanitizer.address_unpoison(res)
+				#no_bounds_check return res, nil
 			}
 		}
 	}
@@ -340,7 +349,7 @@ rb_resize_bytes_non_zeroed :: proc(
 	return
 }
 
-@(private="file", require_results)
+@(private="file", require_results, no_sanitize_address)
 rb_make_block :: proc(size: int, allocator: Allocator) -> (block: ^Rollback_Stack_Block, err: Allocator_Error) {
 	buffer := runtime.mem_alloc(size_of(Rollback_Stack_Block) + size, align_of(Rollback_Stack_Block), allocator) or_return
 	block = cast(^Rollback_Stack_Block)raw_data(buffer)
@@ -351,6 +360,7 @@ rb_make_block :: proc(size: int, allocator: Allocator) -> (block: ^Rollback_Stac
 /*
 Initialize the rollback stack allocator using a fixed backing buffer.
 */
+@(no_sanitize_address)
 rollback_stack_init_buffered :: proc(stack: ^Rollback_Stack, buffer: []byte, location := #caller_location) {
 	MIN_SIZE :: size_of(Rollback_Stack_Block) + size_of(Rollback_Stack_Header) + size_of(rawptr)
 	assert(len(buffer) >= MIN_SIZE, "User-provided buffer to Rollback Stack Allocator is too small.", location)
@@ -365,6 +375,7 @@ rollback_stack_init_buffered :: proc(stack: ^Rollback_Stack, buffer: []byte, loc
 /*
 Initialize the rollback stack alocator using a backing block allocator.
 */
+@(no_sanitize_address)
 rollback_stack_init_dynamic :: proc(
 	stack: ^Rollback_Stack,
 	block_size : int = ROLLBACK_STACK_DEFAULT_BLOCK_SIZE,
@@ -396,6 +407,7 @@ rollback_stack_init :: proc {
 /*
 Destroy a rollback stack.
 */
+@(no_sanitize_address)
 rollback_stack_destroy :: proc(stack: ^Rollback_Stack) {
 	if stack.block_allocator.procedure != nil {
 		rb_free_all(stack)
@@ -435,7 +447,7 @@ from the last allocation backwards.
 Each allocation has an overhead of 8 bytes and any extra bytes to satisfy
 the requested alignment.
 */
-@(require_results)
+@(require_results, no_sanitize_address)
 rollback_stack_allocator :: proc(stack: ^Rollback_Stack) -> Allocator {
 	return Allocator {
 		data = stack,
@@ -443,7 +455,7 @@ rollback_stack_allocator :: proc(stack: ^Rollback_Stack) -> Allocator {
 	}
 }
 
-@(require_results)
+@(require_results, no_sanitize_address)
 rollback_stack_allocator_proc :: proc(
 	allocator_data: rawptr,
 	mode: Allocator_Mode,

+ 1 - 1
core/mem/tlsf/tlsf.odin

@@ -198,4 +198,4 @@ fls :: proc "contextless" (word: u32) -> (bit: i32) {
 fls_uint :: proc "contextless" (size: uint) -> (bit: i32) {
 	N :: (size_of(uint) * 8) - 1
 	return i32(N - intrinsics.count_leading_zeros(size))
-}
+}

+ 52 - 44
core/mem/tlsf/tlsf_internal.odin

@@ -10,6 +10,7 @@
 package mem_tlsf
 
 import "base:intrinsics"
+import "base:sanitizer"
 import "base:runtime"
 
 // log2 of number of linear subdivisions of block sizes.
@@ -209,6 +210,8 @@ alloc_bytes_non_zeroed :: proc(control: ^Allocator, size: uint, align: uint) ->
 				return nil, .Out_Of_Memory
 			}
 
+			sanitizer.address_poison(new_pool_buf)
+
 			// Allocate a new link in the `control.pool` tracking structure.
 			new_pool := new_clone(Pool{
 				data      = new_pool_buf,
@@ -254,7 +257,7 @@ alloc_bytes_non_zeroed :: proc(control: ^Allocator, size: uint, align: uint) ->
 	return block_prepare_used(control, block, adjust)
 }
 
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
 alloc_bytes :: proc(control: ^Allocator, size: uint, align: uint) -> (res: []byte, err: runtime.Allocator_Error) {
 	res, err = alloc_bytes_non_zeroed(control, size, align)
 	if err == nil {
@@ -273,6 +276,7 @@ free_with_size :: proc(control: ^Allocator, ptr: rawptr, size: uint) {
 
 	block := block_from_ptr(ptr)
 	assert(!block_is_free(block), "block already marked as free") // double free
+	sanitizer.address_poison(ptr, block.size)
 	block_mark_as_free(block)
 	block = block_merge_prev(control, block)
 	block = block_merge_next(control, block)
@@ -316,6 +320,7 @@ resize :: proc(control: ^Allocator, ptr: rawptr, old_size, new_size: uint, align
 
 	block_trim_used(control, block, adjust)
 	res = ([^]byte)(ptr)[:new_size]
+	sanitizer.address_unpoison(res)
 
 	if min_size < new_size {
 		to_zero := ([^]byte)(ptr)[min_size:new_size]
@@ -374,95 +379,96 @@ resize_non_zeroed :: proc(control: ^Allocator, ptr: rawptr, old_size, new_size:
 	NOTE: TLSF spec relies on ffs/fls returning a value in the range 0..31.
 */
 
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
 block_size :: proc "contextless" (block: ^Block_Header) -> (size: uint) {
 	return block.size &~ (BLOCK_HEADER_FREE | BLOCK_HEADER_PREV_FREE)
 }
 
-@(private)
+@(private, no_sanitize_address)
 block_set_size :: proc "contextless" (block: ^Block_Header, size: uint) {
 	old_size := block.size
 	block.size = size | (old_size & (BLOCK_HEADER_FREE | BLOCK_HEADER_PREV_FREE))
 }
 
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
 block_is_last :: proc "contextless" (block: ^Block_Header) -> (is_last: bool) {
 	return block_size(block) == 0
 }
 
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
 block_is_free :: proc "contextless" (block: ^Block_Header) -> (is_free: bool) {
 	return (block.size & BLOCK_HEADER_FREE) == BLOCK_HEADER_FREE
 }
 
-@(private)
+@(private, no_sanitize_address)
 block_set_free :: proc "contextless" (block: ^Block_Header) {
 	block.size |= BLOCK_HEADER_FREE
 }
 
-@(private)
+@(private, no_sanitize_address)
 block_set_used :: proc "contextless" (block: ^Block_Header) {
 	block.size &~= BLOCK_HEADER_FREE
 }
 
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
 block_is_prev_free :: proc "contextless" (block: ^Block_Header) -> (is_prev_free: bool) {
 	return (block.size & BLOCK_HEADER_PREV_FREE) == BLOCK_HEADER_PREV_FREE
 }
 
-@(private)
+@(private, no_sanitize_address)
 block_set_prev_free :: proc "contextless" (block: ^Block_Header) {
 	block.size |= BLOCK_HEADER_PREV_FREE
 }
 
-@(private)
+@(private, no_sanitize_address)
 block_set_prev_used :: proc "contextless" (block: ^Block_Header) {
 	block.size &~= BLOCK_HEADER_PREV_FREE
 }
 
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
 block_from_ptr :: proc(ptr: rawptr) -> (block_ptr: ^Block_Header) {
 	return (^Block_Header)(uintptr(ptr) - BLOCK_START_OFFSET)
 }
 
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
 block_to_ptr   :: proc(block: ^Block_Header) -> (ptr: rawptr) {
 	return rawptr(uintptr(block) + BLOCK_START_OFFSET)
 }
 
 // Return location of next block after block of given size.
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
 offset_to_block :: proc(ptr: rawptr, size: uint) -> (block: ^Block_Header) {
 	return (^Block_Header)(uintptr(ptr) + uintptr(size))
 }
 
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
 offset_to_block_backwards :: proc(ptr: rawptr, size: uint) -> (block: ^Block_Header) {
 	return (^Block_Header)(uintptr(ptr) - uintptr(size))
 }
 
 // Return location of previous block.
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
 block_prev :: proc(block: ^Block_Header) -> (prev: ^Block_Header) {
 	assert(block_is_prev_free(block), "previous block must be free")
+
 	return block.prev_phys_block
 }
 
 // Return location of next existing block.
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
 block_next :: proc(block: ^Block_Header) -> (next: ^Block_Header) {
 	return offset_to_block(block_to_ptr(block), block_size(block) - BLOCK_HEADER_OVERHEAD)
 }
 
 // Link a new block with its physical neighbor, return the neighbor.
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
 block_link_next :: proc(block: ^Block_Header) -> (next: ^Block_Header) {
 	next = block_next(block)
 	next.prev_phys_block = block
 	return
 }
 
-@(private)
+@(private, no_sanitize_address)
 block_mark_as_free :: proc(block: ^Block_Header) {
 	// Link the block to the next block, first.
 	next := block_link_next(block)
@@ -470,26 +476,26 @@ block_mark_as_free :: proc(block: ^Block_Header) {
 	block_set_free(block)
 }
 
-@(private)
-block_mark_as_used :: proc(block: ^Block_Header) {
+@(private, no_sanitize_address)
+block_mark_as_used :: proc(block: ^Block_Header, ) {
 	next := block_next(block)
 	block_set_prev_used(next)
 	block_set_used(block)
 }
 
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
 align_up :: proc(x, align: uint) -> (aligned: uint) {
 	assert(0 == (align & (align - 1)), "must align to a power of two")
 	return (x + (align - 1)) &~ (align - 1)
 }
 
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
 align_down :: proc(x, align: uint) -> (aligned: uint) {
 	assert(0 == (align & (align - 1)), "must align to a power of two")
 	return x - (x & (align - 1))
 }
 
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
 align_ptr :: proc(ptr: rawptr, align: uint) -> (aligned: rawptr) {
 	assert(0 == (align & (align - 1)), "must align to a power of two")
 	align_mask := uintptr(align) - 1
@@ -499,7 +505,7 @@ align_ptr :: proc(ptr: rawptr, align: uint) -> (aligned: rawptr) {
 }
 
 // Adjust an allocation size to be aligned to word size, and no smaller than internal minimum.
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
 adjust_request_size :: proc(size, align: uint) -> (adjusted: uint) {
 	if size == 0 {
 		return 0
@@ -513,7 +519,7 @@ adjust_request_size :: proc(size, align: uint) -> (adjusted: uint) {
 }
 
 // Adjust an allocation size to be aligned to word size, and no smaller than internal minimum.
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
 adjust_request_size_with_err :: proc(size, align: uint) -> (adjusted: uint, err: runtime.Allocator_Error) {
 	if size == 0 {
 		return 0, nil
@@ -531,7 +537,7 @@ adjust_request_size_with_err :: proc(size, align: uint) -> (adjusted: uint, err:
 // TLSF utility functions. In most cases these are direct translations of
 // the documentation in the research paper.
 
-@(optimization_mode="favor_size", private, require_results)
+@(optimization_mode="favor_size", private, require_results, no_sanitize_address)
 mapping_insert :: proc(size: uint) -> (fl, sl: i32) {
 	if size < SMALL_BLOCK_SIZE {
 		// Store small blocks in first list.
@@ -544,7 +550,7 @@ mapping_insert :: proc(size: uint) -> (fl, sl: i32) {
 	return
 }
 
-@(optimization_mode="favor_size", private, require_results)
+@(optimization_mode="favor_size", private, require_results, no_sanitize_address)
 mapping_round :: #force_inline proc(size: uint) -> (rounded: uint) {
 	rounded = size
 	if size >= SMALL_BLOCK_SIZE {
@@ -555,12 +561,12 @@ mapping_round :: #force_inline proc(size: uint) -> (rounded: uint) {
 }
 
 // This version rounds up to the next block size (for allocations)
-@(optimization_mode="favor_size", private, require_results)
+@(optimization_mode="favor_size", private, require_results, no_sanitize_address)
 mapping_search :: proc(size: uint) -> (fl, sl: i32) {
 	return mapping_insert(mapping_round(size))
 }
 
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
 search_suitable_block :: proc(control: ^Allocator, fli, sli: ^i32) -> (block: ^Block_Header) {
 	// First, search for a block in the list associated with the given fl/sl index.
 	fl := fli^; sl := sli^
@@ -587,7 +593,7 @@ search_suitable_block :: proc(control: ^Allocator, fli, sli: ^i32) -> (block: ^B
 }
 
 // Remove a free block from the free list.
-@(private)
+@(private, no_sanitize_address)
 remove_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl: i32) {
 	prev := block.prev_free
 	next := block.next_free
@@ -613,7 +619,7 @@ remove_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl
 }
 
 // Insert a free block into the free block list.
-@(private)
+@(private, no_sanitize_address)
 insert_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl: i32) {
 	current := control.blocks[fl][sl]
 	assert(current != nil, "free lists cannot have a nil entry")
@@ -631,26 +637,26 @@ insert_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl
 }
 
 // Remove a given block from the free list.
-@(private)
+@(private, no_sanitize_address)
 block_remove :: proc(control: ^Allocator, block: ^Block_Header) {
 	fl, sl := mapping_insert(block_size(block))
 	remove_free_block(control, block, fl, sl)
 }
 
 // Insert a given block into the free list.
-@(private)
+@(private, no_sanitize_address)
 block_insert :: proc(control: ^Allocator, block: ^Block_Header) {
 	fl, sl := mapping_insert(block_size(block))
 	insert_free_block(control, block, fl, sl)
 }
 
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
 block_can_split :: proc(block: ^Block_Header, size: uint) -> (can_split: bool) {
 	return block_size(block) >= size_of(Block_Header) + size
 }
 
 // Split a block into two, the second of which is free.
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
 block_split :: proc(block: ^Block_Header, size: uint) -> (remaining: ^Block_Header) {
 	// Calculate the amount of space left in the remaining block.
 	remaining = offset_to_block(block_to_ptr(block), size - BLOCK_HEADER_OVERHEAD)
@@ -671,9 +677,10 @@ block_split :: proc(block: ^Block_Header, size: uint) -> (remaining: ^Block_Head
 }
 
 // Absorb a free block's storage into an adjacent previous free block.
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
 block_absorb :: proc(prev: ^Block_Header, block: ^Block_Header) -> (absorbed: ^Block_Header) {
 	assert(!block_is_last(prev), "previous block can't be last")
+
 	// Note: Leaves flags untouched.
 	prev.size += block_size(block) + BLOCK_HEADER_OVERHEAD
 	_ = block_link_next(prev)
@@ -681,7 +688,7 @@ block_absorb :: proc(prev: ^Block_Header, block: ^Block_Header) -> (absorbed: ^B
 }
 
 // Merge a just-freed block with an adjacent previous free block.
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
 block_merge_prev :: proc(control: ^Allocator, block: ^Block_Header) -> (merged: ^Block_Header) {
 	merged = block
 	if (block_is_prev_free(block)) {
@@ -695,7 +702,7 @@ block_merge_prev :: proc(control: ^Allocator, block: ^Block_Header) -> (merged:
 }
 
 // Merge a just-freed block with an adjacent free block.
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
 block_merge_next :: proc(control: ^Allocator, block: ^Block_Header) -> (merged: ^Block_Header) {
 	merged = block
 	next  := block_next(block)
@@ -710,7 +717,7 @@ block_merge_next :: proc(control: ^Allocator, block: ^Block_Header) -> (merged:
 }
 
 // Trim any trailing block space off the end of a free block, return to pool.
-@(private)
+@(private, no_sanitize_address)
 block_trim_free :: proc(control: ^Allocator, block: ^Block_Header, size: uint) {
 	assert(block_is_free(block), "block must be free")
 	if (block_can_split(block, size)) {
@@ -722,7 +729,7 @@ block_trim_free :: proc(control: ^Allocator, block: ^Block_Header, size: uint) {
 }
 
 // Trim any trailing block space off the end of a used block, return to pool.
-@(private)
+@(private, no_sanitize_address)
 block_trim_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint) {
 	assert(!block_is_free(block), "Block must be used")
 	if (block_can_split(block, size)) {
@@ -736,7 +743,7 @@ block_trim_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint) {
 }
 
 // Trim leading block space, return to pool.
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
 block_trim_free_leading :: proc(control: ^Allocator, block: ^Block_Header, size: uint) -> (remaining: ^Block_Header) {
 	remaining = block
 	if block_can_split(block, size) {
@@ -750,7 +757,7 @@ block_trim_free_leading :: proc(control: ^Allocator, block: ^Block_Header, size:
 	return remaining
 }
 
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
 block_locate_free :: proc(control: ^Allocator, size: uint) -> (block: ^Block_Header) {
 	fl, sl: i32
 	if size != 0 {
@@ -774,13 +781,14 @@ block_locate_free :: proc(control: ^Allocator, size: uint) -> (block: ^Block_Hea
 	return block
 }
 
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
 block_prepare_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint) -> (res: []byte, err: runtime.Allocator_Error) {
 	if block != nil {
 		assert(size != 0, "Size must be non-zero")
 		block_trim_free(control, block, size)
 		block_mark_as_used(block)
 		res = ([^]byte)(block_to_ptr(block))[:size]
+		sanitizer.address_unpoison(res)
 	}
 	return
-}
+}

+ 10 - 1
core/mem/tracking_allocator.odin

@@ -64,6 +64,7 @@ This procedure initializes the tracking allocator `t` with a backing allocator
 specified with `backing_allocator`. The `internals_allocator` will used to
 allocate the tracked data.
 */
+@(no_sanitize_address)
 tracking_allocator_init :: proc(t: ^Tracking_Allocator, backing_allocator: Allocator, internals_allocator := context.allocator) {
 	t.backing = backing_allocator
 	t.allocation_map.allocator = internals_allocator
@@ -77,6 +78,7 @@ tracking_allocator_init :: proc(t: ^Tracking_Allocator, backing_allocator: Alloc
 /*
 Destroy the tracking allocator.
 */
+@(no_sanitize_address)
 tracking_allocator_destroy :: proc(t: ^Tracking_Allocator) {
 	delete(t.allocation_map)
 	delete(t.bad_free_array)
@@ -90,6 +92,7 @@ This procedure clears the tracked data from a tracking allocator.
 **Note**: This procedure clears only the current allocation data while keeping
 the totals intact.
 */
+@(no_sanitize_address)
 tracking_allocator_clear :: proc(t: ^Tracking_Allocator) {
 	sync.mutex_lock(&t.mutex)
 	clear(&t.allocation_map)
@@ -103,6 +106,7 @@ Reset the tracking allocator.
 
 Reset all of a Tracking Allocator's allocation data back to zero.
 */
+@(no_sanitize_address)
 tracking_allocator_reset :: proc(t: ^Tracking_Allocator) {
 	sync.mutex_lock(&t.mutex)
 	clear(&t.allocation_map)
@@ -124,6 +128,7 @@ Override Tracking_Allocator.bad_free_callback to have something else happen. For
 example, you can use tracking_allocator_bad_free_callback_add_to_array to return
 the tracking allocator to the old behavior, where the bad_free_array was used.
 */
+@(no_sanitize_address)
 tracking_allocator_bad_free_callback_panic :: proc(t: ^Tracking_Allocator, memory: rawptr, location: runtime.Source_Code_Location) {
 	runtime.print_caller_location(location)
 	runtime.print_string(" Tracking allocator error: Bad free of pointer ")
@@ -136,6 +141,7 @@ tracking_allocator_bad_free_callback_panic :: proc(t: ^Tracking_Allocator, memor
 Alternative behavior for a bad free: Store in `bad_free_array`. If you use this,
 then you must make sure to check Tracking_Allocator.bad_free_array at some point.
 */
+@(no_sanitize_address)
 tracking_allocator_bad_free_callback_add_to_array :: proc(t: ^Tracking_Allocator, memory: rawptr, location: runtime.Source_Code_Location) {
 	append(&t.bad_free_array, Tracking_Allocator_Bad_Free_Entry {
 		memory = memory,
@@ -175,7 +181,7 @@ Example:
 		}
 	}
 */
-@(require_results)
+@(require_results, no_sanitize_address)
 tracking_allocator :: proc(data: ^Tracking_Allocator) -> Allocator {
 	return Allocator{
 		data = data,
@@ -183,6 +189,7 @@ tracking_allocator :: proc(data: ^Tracking_Allocator) -> Allocator {
 	}
 }
 
+@(no_sanitize_address)
 tracking_allocator_proc :: proc(
 	allocator_data: rawptr,
 	mode: Allocator_Mode,
@@ -191,6 +198,7 @@ tracking_allocator_proc :: proc(
 	old_size: int,
 	loc := #caller_location,
 ) -> (result: []byte, err: Allocator_Error) {
+	@(no_sanitize_address)
 	track_alloc :: proc(data: ^Tracking_Allocator, entry: ^Tracking_Allocator_Entry) {
 		data.total_memory_allocated += i64(entry.size)
 		data.total_allocation_count += 1
@@ -200,6 +208,7 @@ tracking_allocator_proc :: proc(
 		}
 	}
 
+	@(no_sanitize_address)
 	track_free :: proc(data: ^Tracking_Allocator, entry: ^Tracking_Allocator_Entry) {
 		data.total_memory_freed += i64(entry.size)
 		data.total_free_count += 1

+ 32 - 11
core/mem/virtual/arena.odin

@@ -3,6 +3,8 @@ package mem_virtual
 import "core:mem"
 import "core:sync"
 
+import "base:sanitizer"
+
 Arena_Kind :: enum uint {
 	Growing = 0, // Chained memory blocks (singly linked list).
 	Static  = 1, // Fixed reservation sized.
@@ -43,7 +45,7 @@ DEFAULT_ARENA_STATIC_RESERVE_SIZE :: mem.Gigabyte when size_of(uintptr) == 8 els
 
 // Initialization of an `Arena` to be a `.Growing` variant.
 // A growing arena is a linked list of `Memory_Block`s allocated with virtual memory.
-@(require_results)
+@(require_results, no_sanitize_address)
 arena_init_growing :: proc(arena: ^Arena, reserved: uint = DEFAULT_ARENA_GROWING_MINIMUM_BLOCK_SIZE) -> (err: Allocator_Error) {
 	arena.kind           = .Growing
 	arena.curr_block     = memory_block_alloc(0, reserved, {}) or_return
@@ -53,24 +55,26 @@ arena_init_growing :: proc(arena: ^Arena, reserved: uint = DEFAULT_ARENA_GROWING
 	if arena.minimum_block_size == 0 {
 		arena.minimum_block_size = reserved
 	}
+	sanitizer.address_poison(arena.curr_block.base[:arena.curr_block.committed])
 	return
 }
 
 
 // Initialization of an `Arena` to be a `.Static` variant.
 // A static arena contains a single `Memory_Block` allocated with virtual memory.
-@(require_results)
+@(require_results, no_sanitize_address)
 arena_init_static :: proc(arena: ^Arena, reserved: uint = DEFAULT_ARENA_STATIC_RESERVE_SIZE, commit_size: uint = DEFAULT_ARENA_STATIC_COMMIT_SIZE) -> (err: Allocator_Error) {
 	arena.kind           = .Static
 	arena.curr_block     = memory_block_alloc(commit_size, reserved, {}) or_return
 	arena.total_used     = 0
 	arena.total_reserved = arena.curr_block.reserved
+	sanitizer.address_poison(arena.curr_block.base[:arena.curr_block.committed])
 	return
 }
 
 // Initialization of an `Arena` to be a `.Buffer` variant.
 // A buffer arena contains single `Memory_Block` created from a user provided []byte.
-@(require_results)
+@(require_results, no_sanitize_address)
 arena_init_buffer :: proc(arena: ^Arena, buffer: []byte) -> (err: Allocator_Error) {
 	if len(buffer) < size_of(Memory_Block) {
 		return .Out_Of_Memory
@@ -78,7 +82,7 @@ arena_init_buffer :: proc(arena: ^Arena, buffer: []byte) -> (err: Allocator_Erro
 
 	arena.kind = .Buffer
 
-	mem.zero_slice(buffer)
+	sanitizer.address_poison(buffer[:])
 
 	block_base := raw_data(buffer)
 	block := (^Memory_Block)(block_base)
@@ -94,7 +98,7 @@ arena_init_buffer :: proc(arena: ^Arena, buffer: []byte) -> (err: Allocator_Erro
 }
 
 // Allocates memory from the provided arena.
-@(require_results)
+@(require_results, no_sanitize_address)
 arena_alloc :: proc(arena: ^Arena, size: uint, alignment: uint, loc := #caller_location) -> (data: []byte, err: Allocator_Error) {
 	assert(alignment & (alignment-1) == 0, "non-power of two alignment", loc)
 
@@ -158,10 +162,13 @@ arena_alloc :: proc(arena: ^Arena, size: uint, alignment: uint, loc := #caller_l
 		data, err = alloc_from_memory_block(arena.curr_block, size, alignment, default_commit_size=0)
 		arena.total_used = arena.curr_block.used
 	}
+
+	sanitizer.address_unpoison(data)
 	return
 }
 
 // Resets the memory of a Static or Buffer arena to a specific `position` (offset) and zeroes the previously used memory.
+@(no_sanitize_address)
 arena_static_reset_to :: proc(arena: ^Arena, pos: uint, loc := #caller_location) -> bool {
 	sync.mutex_guard(&arena.mutex)
 
@@ -175,6 +182,7 @@ arena_static_reset_to :: proc(arena: ^Arena, pos: uint, loc := #caller_location)
 			mem.zero_slice(arena.curr_block.base[arena.curr_block.used:][:prev_pos-pos])
 		}
 		arena.total_used = arena.curr_block.used
+		sanitizer.address_poison(arena.curr_block.base[:arena.curr_block.committed])
 		return true
 	} else if pos == 0 {
 		arena.total_used = 0
@@ -184,6 +192,7 @@ arena_static_reset_to :: proc(arena: ^Arena, pos: uint, loc := #caller_location)
 }
 
 // Frees the last memory block of a Growing Arena
+@(no_sanitize_address)
 arena_growing_free_last_memory_block :: proc(arena: ^Arena, loc := #caller_location) {
 	if free_block := arena.curr_block; free_block != nil {
 		assert(arena.kind == .Growing, "expected a .Growing arena", loc)
@@ -191,11 +200,13 @@ arena_growing_free_last_memory_block :: proc(arena: ^Arena, loc := #caller_locat
 		arena.total_reserved -= free_block.reserved
 
 		arena.curr_block = free_block.prev
+		sanitizer.address_poison(free_block.base[:free_block.committed])
 		memory_block_dealloc(free_block)
 	}
 }
 
 // Deallocates all but the first memory block of the arena and resets the allocator's usage to 0.
+@(no_sanitize_address)
 arena_free_all :: proc(arena: ^Arena, loc := #caller_location) {
 	switch arena.kind {
 	case .Growing:
@@ -208,7 +219,9 @@ arena_free_all :: proc(arena: ^Arena, loc := #caller_location) {
 		if arena.curr_block != nil {
 			curr_block_used := int(arena.curr_block.used)
 			arena.curr_block.used = 0
+			sanitizer.address_unpoison(arena.curr_block.base[:curr_block_used])
 			mem.zero(arena.curr_block.base, curr_block_used)
+			sanitizer.address_poison(arena.curr_block.base[:arena.curr_block.committed])
 		}
 		arena.total_used = 0
 	case .Static, .Buffer:
@@ -219,6 +232,7 @@ arena_free_all :: proc(arena: ^Arena, loc := #caller_location) {
 
 // Frees all of the memory allocated by the arena and zeros all of the values of an arena.
 // A buffer based arena does not `delete` the provided `[]byte` bufffer.
+@(no_sanitize_address)
 arena_destroy :: proc(arena: ^Arena, loc := #caller_location) {
 	sync.mutex_guard(&arena.mutex)
 	switch arena.kind {
@@ -250,7 +264,7 @@ arena_static_bootstrap_new :: proc{
 }
 
 // Ability to bootstrap allocate a struct with an arena within the struct itself using the growing variant strategy.
-@(require_results)
+@(require_results, no_sanitize_address)
 arena_growing_bootstrap_new_by_offset :: proc($T: typeid, offset_to_arena: uintptr, minimum_block_size: uint = DEFAULT_ARENA_GROWING_MINIMUM_BLOCK_SIZE) -> (ptr: ^T, err: Allocator_Error) {
 	bootstrap: Arena
 	bootstrap.kind = .Growing
@@ -266,13 +280,13 @@ arena_growing_bootstrap_new_by_offset :: proc($T: typeid, offset_to_arena: uintp
 }
 
 // Ability to bootstrap allocate a struct with an arena within the struct itself using the growing variant strategy.
-@(require_results)
+@(require_results, no_sanitize_address)
 arena_growing_bootstrap_new_by_name :: proc($T: typeid, $field_name: string, minimum_block_size: uint = DEFAULT_ARENA_GROWING_MINIMUM_BLOCK_SIZE) -> (ptr: ^T, err: Allocator_Error) {
 	return arena_growing_bootstrap_new_by_offset(T, offset_of_by_string(T, field_name), minimum_block_size)
 }
 
 // Ability to bootstrap allocate a struct with an arena within the struct itself using the static variant strategy.
-@(require_results)
+@(require_results, no_sanitize_address)
 arena_static_bootstrap_new_by_offset :: proc($T: typeid, offset_to_arena: uintptr, reserved: uint) -> (ptr: ^T, err: Allocator_Error) {
 	bootstrap: Arena
 	bootstrap.kind = .Static
@@ -288,19 +302,20 @@ arena_static_bootstrap_new_by_offset :: proc($T: typeid, offset_to_arena: uintpt
 }
 
 // Ability to bootstrap allocate a struct with an arena within the struct itself using the static variant strategy.
-@(require_results)
+@(require_results, no_sanitize_address)
 arena_static_bootstrap_new_by_name :: proc($T: typeid, $field_name: string, reserved: uint) -> (ptr: ^T, err: Allocator_Error) {
 	return arena_static_bootstrap_new_by_offset(T, offset_of_by_string(T, field_name), reserved)
 }
 
 
 // Create an `Allocator` from the provided `Arena`
-@(require_results)
+@(require_results, no_sanitize_address)
 arena_allocator :: proc(arena: ^Arena) -> mem.Allocator {
 	return mem.Allocator{arena_allocator_proc, arena}
 }
 
 // The allocator procedure used by an `Allocator` produced by `arena_allocator`
+@(no_sanitize_address)
 arena_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode,
                              size, alignment: int,
                              old_memory: rawptr, old_size: int,
@@ -334,6 +349,7 @@ arena_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode,
 			if size < old_size {
 				// shrink data in-place
 				data = old_data[:size]
+				sanitizer.address_poison(old_data[size:old_size])
 				return
 			}
 
@@ -347,6 +363,7 @@ arena_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode,
 					_ = alloc_from_memory_block(block, new_end - old_end, 1, default_commit_size=arena.default_commit_size) or_return
 					arena.total_used += block.used - prev_used
 					data = block.base[start:new_end]
+					sanitizer.address_unpoison(data)
 					return
 				}
 			}
@@ -357,6 +374,7 @@ arena_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode,
 			return
 		}
 		copy(new_memory, old_data[:old_size])
+		sanitizer.address_poison(old_data[:old_size])
 		return new_memory, nil
 	case .Query_Features:
 		set := (^mem.Allocator_Mode_Set)(old_memory)
@@ -382,7 +400,7 @@ Arena_Temp :: struct {
 }
 
 // Begins the section of temporary arena memory.
-@(require_results)
+@(require_results, no_sanitize_address)
 arena_temp_begin :: proc(arena: ^Arena, loc := #caller_location) -> (temp: Arena_Temp) {
 	assert(arena != nil, "nil arena", loc)
 	sync.mutex_guard(&arena.mutex)
@@ -397,6 +415,7 @@ arena_temp_begin :: proc(arena: ^Arena, loc := #caller_location) -> (temp: Arena
 }
 
 // Ends the section of temporary arena memory by resetting the memory to the stored position.
+@(no_sanitize_address)
 arena_temp_end :: proc(temp: Arena_Temp, loc := #caller_location) {
 	assert(temp.arena != nil, "nil arena", loc)
 	arena := temp.arena
@@ -432,6 +451,7 @@ arena_temp_end :: proc(temp: Arena_Temp, loc := #caller_location) {
 }
 
 // Ignore the use of a `arena_temp_begin` entirely by __not__ resetting to the stored position.
+@(no_sanitize_address)
 arena_temp_ignore :: proc(temp: Arena_Temp, loc := #caller_location) {
 	assert(temp.arena != nil, "nil arena", loc)
 	arena := temp.arena
@@ -442,6 +462,7 @@ arena_temp_ignore :: proc(temp: Arena_Temp, loc := #caller_location) {
 }
 
 // Asserts that all uses of `Arena_Temp` has been used by an `Arena`
+@(no_sanitize_address)
 arena_check_temp :: proc(arena: ^Arena, loc := #caller_location) {
 	assert(arena.temp_count == 0, "Arena_Temp not been ended", loc)
 }

+ 17 - 8
core/mem/virtual/virtual.odin

@@ -2,6 +2,7 @@ package mem_virtual
 
 import "core:mem"
 import "base:intrinsics"
+import "base:sanitizer"
 import "base:runtime"
 _ :: runtime
 
@@ -14,27 +15,33 @@ platform_memory_init :: proc() {
 
 Allocator_Error :: mem.Allocator_Error
 
-@(require_results)
+@(require_results, no_sanitize_address)
 reserve :: proc "contextless" (size: uint) -> (data: []byte, err: Allocator_Error) {
 	return _reserve(size)
 }
 
+@(no_sanitize_address)
 commit :: proc "contextless" (data: rawptr, size: uint) -> Allocator_Error {
+	sanitizer.address_unpoison(data, size)
 	return _commit(data, size)
 }
 
-@(require_results)
+@(require_results, no_sanitize_address)
 reserve_and_commit :: proc "contextless" (size: uint) -> (data: []byte, err: Allocator_Error) {
 	data = reserve(size) or_return
 	commit(raw_data(data), size) or_return
 	return
 }
 
+@(no_sanitize_address)
 decommit :: proc "contextless" (data: rawptr, size: uint) {
+	sanitizer.address_poison(data, size)
 	_decommit(data, size)
 }
 
+@(no_sanitize_address)
 release :: proc "contextless" (data: rawptr, size: uint) {
+	sanitizer.address_unpoison(data, size)
 	_release(data, size)
 }
 
@@ -46,13 +53,11 @@ Protect_Flag :: enum u32 {
 Protect_Flags :: distinct bit_set[Protect_Flag; u32]
 Protect_No_Access :: Protect_Flags{}
 
+@(no_sanitize_address)
 protect :: proc "contextless" (data: rawptr, size: uint, flags: Protect_Flags) -> bool {
 	return _protect(data, size, flags)
 }
 
-
-
-
 Memory_Block :: struct {
 	prev: ^Memory_Block,
 	base:      [^]byte,
@@ -66,13 +71,13 @@ Memory_Block_Flag :: enum u32 {
 Memory_Block_Flags :: distinct bit_set[Memory_Block_Flag; u32]
 
 
-@(private="file", require_results)
+@(private="file", require_results, no_sanitize_address)
 align_formula :: #force_inline proc "contextless" (size, align: uint) -> uint {
 	result := size + align-1
 	return result - result%align
 }
 
-@(require_results)
+@(require_results, no_sanitize_address)
 memory_block_alloc :: proc(committed, reserved: uint, alignment: uint = 0, flags: Memory_Block_Flags = {}) -> (block: ^Memory_Block, err: Allocator_Error) {
 	page_size := DEFAULT_PAGE_SIZE
 	assert(mem.is_power_of_two(uintptr(page_size)))
@@ -116,8 +121,9 @@ memory_block_alloc :: proc(committed, reserved: uint, alignment: uint = 0, flags
 	return &pmblock.block, nil
 }
 
-@(require_results)
+@(require_results, no_sanitize_address)
 alloc_from_memory_block :: proc(block: ^Memory_Block, min_size, alignment: uint, default_commit_size: uint = 0) -> (data: []byte, err: Allocator_Error) {
+	@(no_sanitize_address)
 	calc_alignment_offset :: proc "contextless" (block: ^Memory_Block, alignment: uintptr) -> uint {
 		alignment_offset := uint(0)
 		ptr := uintptr(block.base[block.used:])
@@ -128,6 +134,7 @@ alloc_from_memory_block :: proc(block: ^Memory_Block, min_size, alignment: uint,
 		return alignment_offset
 		
 	}
+	@(no_sanitize_address)
 	do_commit_if_necessary :: proc(block: ^Memory_Block, size: uint, default_commit_size: uint) -> (err: Allocator_Error) {
 		if block.committed - block.used < size {
 			pmblock := (^Platform_Memory_Block)(block)
@@ -172,10 +179,12 @@ alloc_from_memory_block :: proc(block: ^Memory_Block, min_size, alignment: uint,
 
 	data = block.base[block.used+alignment_offset:][:min_size]
 	block.used += size
+	sanitizer.address_unpoison(data)
 	return
 }
 
 
+@(no_sanitize_address)
 memory_block_dealloc :: proc(block_to_free: ^Memory_Block) {
 	if block := (^Platform_Memory_Block)(block_to_free); block != nil {
 		platform_memory_free(block)

+ 3 - 0
core/mem/virtual/virtual_platform.odin

@@ -7,6 +7,7 @@ Platform_Memory_Block :: struct {
 	reserved:   uint,
 } 
 
+@(no_sanitize_address)
 platform_memory_alloc :: proc "contextless" (to_commit, to_reserve: uint) -> (block: ^Platform_Memory_Block, err: Allocator_Error) {
 	to_commit, to_reserve := to_commit, to_reserve
 	to_reserve = max(to_commit, to_reserve)
@@ -26,12 +27,14 @@ platform_memory_alloc :: proc "contextless" (to_commit, to_reserve: uint) -> (bl
 }
 
 
+@(no_sanitize_address)
 platform_memory_free :: proc "contextless" (block: ^Platform_Memory_Block) {
 	if block != nil {
 		release(block, block.reserved)
 	}
 }
 
+@(no_sanitize_address)
 platform_memory_commit :: proc "contextless" (block: ^Platform_Memory_Block, to_commit: uint) -> (err: Allocator_Error) {
 	if to_commit < block.committed {
 		return nil

+ 11 - 1
core/mem/virtual/virtual_windows.odin

@@ -83,6 +83,8 @@ foreign Kernel32 {
 		dwNumberOfBytesToMap: uint,
 	) -> rawptr ---
 }
+
+@(no_sanitize_address)
 _reserve :: proc "contextless" (size: uint) -> (data: []byte, err: Allocator_Error) {
 	result := VirtualAlloc(nil, size, MEM_RESERVE, PAGE_READWRITE)
 	if result == nil {
@@ -93,6 +95,7 @@ _reserve :: proc "contextless" (size: uint) -> (data: []byte, err: Allocator_Err
 	return
 }
 
+@(no_sanitize_address)
 _commit :: proc "contextless" (data: rawptr, size: uint) -> Allocator_Error {
 	result := VirtualAlloc(data, size, MEM_COMMIT, PAGE_READWRITE)
 	if result == nil {
@@ -107,12 +110,18 @@ _commit :: proc "contextless" (data: rawptr, size: uint) -> Allocator_Error {
 	}
 	return nil
 }
+
+@(no_sanitize_address)
 _decommit :: proc "contextless" (data: rawptr, size: uint) {
 	VirtualFree(data, size, MEM_DECOMMIT)
 }
+
+@(no_sanitize_address)
 _release :: proc "contextless" (data: rawptr, size: uint) {
 	VirtualFree(data, 0, MEM_RELEASE)
 }
+
+@(no_sanitize_address)
 _protect :: proc "contextless" (data: rawptr, size: uint, flags: Protect_Flags) -> bool {
 	pflags: u32
 	pflags = PAGE_NOACCESS
@@ -136,7 +145,7 @@ _protect :: proc "contextless" (data: rawptr, size: uint, flags: Protect_Flags)
 }
 
 
-
+@(no_sanitize_address)
 _platform_memory_init :: proc() {
 	sys_info: SYSTEM_INFO
 	GetSystemInfo(&sys_info)
@@ -147,6 +156,7 @@ _platform_memory_init :: proc() {
 }
 
 
+@(no_sanitize_address)
 _map_file :: proc "contextless" (fd: uintptr, size: i64, flags: Map_File_Flags) -> (data: []byte, error: Map_File_Error) {
 	page_flags: u32
 	if flags == {.Read} {

+ 2 - 0
src/llvm_backend.hpp

@@ -383,6 +383,8 @@ struct lbProcedure {
 	PtrMap<Ast *, lbValue> selector_values;
 	PtrMap<Ast *, lbAddr>  selector_addr;
 	PtrMap<LLVMValueRef, lbTupleFix> tuple_fix_map;
+
+	Array<lbValue> asan_stack_locals;
 };
 
 

+ 7 - 0
src/llvm_backend_general.cpp

@@ -3070,6 +3070,13 @@ gb_internal lbAddr lb_add_local(lbProcedure *p, Type *type, Entity *e, bool zero
 	if (e != nullptr) {
 		lb_add_entity(p->module, e, val);
 		lb_add_debug_local_variable(p, ptr, type, e->token);
+
+		// NOTE(lucas): In LLVM 20 and below we do not have the option to have asan cleanup poisoned stack
+		// locals ourselves. So we need to manually track and unpoison these locals on proc return.
+		// LLVM 21 adds the 'use-after-scope' asan option which does this for us.
+		if (build_context.sanitizer_flags & SanitizerFlag_Address && !p->entity->Procedure.no_sanitize_address) {
+			array_add(&p->asan_stack_locals, val);
+		}
 	}
 
 	if (zero_init) {

+ 13 - 11
src/llvm_backend_proc.cpp

@@ -115,12 +115,13 @@ gb_internal lbProcedure *lb_create_procedure(lbModule *m, Entity *entity, bool i
 	p->is_entry_point = false;
 
 	gbAllocator a = heap_allocator();
-	p->children.allocator      = a;
-	p->defer_stmts.allocator   = a;
-	p->blocks.allocator        = a;
-	p->branch_blocks.allocator = a;
-	p->context_stack.allocator = a;
-	p->scope_stack.allocator   = a;
+	p->children.allocator          = a;
+	p->defer_stmts.allocator       = a;
+	p->blocks.allocator            = a;
+	p->branch_blocks.allocator     = a;
+	p->context_stack.allocator     = a;
+	p->scope_stack.allocator       = a;
+	p->asan_stack_locals.allocator = a;
 	// map_init(&p->selector_values,  0);
 	// map_init(&p->selector_addr,    0);
 	// map_init(&p->tuple_fix_map,    0);
@@ -385,11 +386,12 @@ gb_internal lbProcedure *lb_create_dummy_procedure(lbModule *m, String link_name
 	p->is_entry_point = false;
 
 	gbAllocator a = permanent_allocator();
-	p->children.allocator      = a;
-	p->defer_stmts.allocator   = a;
-	p->blocks.allocator        = a;
-	p->branch_blocks.allocator = a;
-	p->context_stack.allocator = a;
+	p->children.allocator          = a;
+	p->defer_stmts.allocator       = a;
+	p->blocks.allocator            = a;
+	p->branch_blocks.allocator     = a;
+	p->context_stack.allocator     = a;
+	p->asan_stack_locals.allocator = a;
 	map_init(&p->tuple_fix_map, 0);
 
 

+ 12 - 0
src/llvm_backend_stmt.cpp

@@ -2917,6 +2917,18 @@ gb_internal void lb_emit_defer_stmts(lbProcedure *p, lbDeferExitKind kind, lbBlo
 	}
 	defer (p->branch_location_pos = prev_token_pos);
 
+	// TODO(lucas): In LLVM 21 use the 'use-after-scope' asan option which does this for us.
+	if (kind == lbDeferExit_Return) {
+		for_array(i, p->asan_stack_locals) {
+			lbValue local = p->asan_stack_locals[i];
+
+			auto args = array_make<lbValue>(temporary_allocator(), 2);
+			args[0] = lb_emit_conv(p, local, t_rawptr);
+			args[1] = lb_const_int(p->module, t_int, type_size_of(local.type->Pointer.elem));
+			lb_emit_runtime_call(p, "__asan_unpoison_memory_region", args);
+		}
+	}
+
 	isize count = p->defer_stmts.count;
 	isize i = count;
 	while (i --> 0) {