Browse Source

[mem]: Document mutex, rollback stack and tracking allocators

flysand7 11 months ago
parent
commit
016d1a84d4
4 changed files with 197 additions and 113 deletions
  1. 15 0
      core/mem/mutex_allocator.odin
  2. 30 16
      core/mem/raw.odin
  3. 74 61
      core/mem/rollback_stack_allocator.odin
  4. 78 36
      core/mem/tracking_allocator.odin

+ 15 - 0
core/mem/mutex_allocator.odin

@@ -3,16 +3,31 @@ package mem
 
 import "core:sync"
 
+/*
+The data for mutex allocator.
+*/
 Mutex_Allocator :: struct {
 	backing: Allocator,
 	mutex:   sync.Mutex,
 }
 
+/*
+Initialize the mutex allocator.
+
+This procedure initializes the mutex allocator using `backin_allocator` as the
+allocator that will be used to pass all allocation requests through.
+*/
 mutex_allocator_init :: proc(m: ^Mutex_Allocator, backing_allocator: Allocator) {
 	m.backing = backing_allocator
 	m.mutex = {}
 }
 
+/*
+Mutex allocator.
+
+The mutex allocator is a wrapper for allocators that is used to serialize all
+allocator requests across multiple threads.
+*/
 @(require_results)
 mutex_allocator :: proc(m: ^Mutex_Allocator) -> Allocator {
 	return Allocator{

+ 30 - 16
core/mem/raw.odin

@@ -4,68 +4,82 @@ import "base:builtin"
 import "base:runtime"
 
 /*
-Mamory layout of the `any` type.
+Memory layout of the `any` type.
 */
 Raw_Any :: runtime.Raw_Any
 
 /*
-Mamory layout of the `string` type.
+Memory layout of the `string` type.
 */
 Raw_String :: runtime.Raw_String
+
 /*
-Mamory layout of the `cstring` type.
+Memory layout of the `cstring` type.
 */
 Raw_Cstring :: runtime.Raw_Cstring
+
 /*
-Mamory layout of `[]T` types.
+Memory layout of `[]T` types.
 */
 Raw_Slice :: runtime.Raw_Slice
+
 /*
-Mamory layout of `[dynamic]T` types.
+Memory layout of `[dynamic]T` types.
 */
 Raw_Dynamic_Array :: runtime.Raw_Dynamic_Array
+
 /*
-Mamory layout of `map[K]V` types.
+Memory layout of `map[K]V` types.
 */
 Raw_Map :: runtime.Raw_Map
+
 /*
-Mamory layout of `#soa []T` types.
+Memory layout of `#soa []T` types.
 */
 Raw_Soa_Pointer :: runtime.Raw_Soa_Pointer
+
 /*
-Mamory layout of the `complex32` type.
+Memory layout of the `complex32` type.
 */
 Raw_Complex32 :: runtime.Raw_Complex32
+
 /*
-Mamory layout of the `complex64` type.
+Memory layout of the `complex64` type.
 */
 Raw_Complex64 :: runtime.Raw_Complex64
+
 /*
-Mamory layout of the `complex128` type.
+Memory layout of the `complex128` type.
 */
 Raw_Complex128 :: runtime.Raw_Complex128
+
 /*
-Mamory layout of the `quaternion64` type.
+Memory layout of the `quaternion64` type.
 */
 Raw_Quaternion64 :: runtime.Raw_Quaternion64
+
 /*
-Mamory layout of the `quaternion128` type.
+Memory layout of the `quaternion128` type.
 */
 Raw_Quaternion128 :: runtime.Raw_Quaternion128
+
 /*
-Mamory layout of the `quaternion256` type.
+Memory layout of the `quaternion256` type.
 */
 Raw_Quaternion256 :: runtime.Raw_Quaternion256
+
 /*
-Mamory layout of the `quaternion64` type.
+Memory layout of the `quaternion64` type.
 */
 Raw_Quaternion64_Vector_Scalar :: runtime.Raw_Quaternion64_Vector_Scalar
+
 /*
-Mamory layout of the `quaternion128` type.
+Memory layout of the `quaternion128` type.
 */
 Raw_Quaternion128_Vector_Scalar :: runtime.Raw_Quaternion128_Vector_Scalar
+
 /*
-Mamory layout of the `quaternion256` type.
+Memory layout of the `quaternion256` type.
 */
 Raw_Quaternion256_Vector_Scalar :: runtime.Raw_Quaternion256_Vector_Scalar
 

+ 74 - 61
core/mem/rollback_stack_allocator.odin

@@ -1,39 +1,15 @@
 package mem
 
-/*
-The Rollback Stack Allocator was designed for the test runner to be fast,
-able to grow, and respect the Tracking Allocator's requirement for
-individual frees. It is not overly concerned with fragmentation, however.
-
-It has support for expansion when configured with a block allocator and
-limited support for out-of-order frees.
-
-Allocation has constant-time best and usual case performance.
-At worst, it is linear according to the number of memory blocks.
-
-Allocation follows a first-fit strategy when there are multiple memory
-blocks.
-
-Freeing has constant-time best and usual case performance.
-At worst, it is linear according to the number of memory blocks and number
-of freed items preceding the last item in a block.
-
-Resizing has constant-time performance, if it's the last item in a block, or
-the new size is smaller. Naturally, this becomes linear-time if there are
-multiple blocks to search for the pointer's owning block. Otherwise, the
-allocator defaults to a combined alloc & free operation internally.
-
-Out-of-order freeing is accomplished by collapsing a run of freed items
-from the last allocation backwards.
-
-Each allocation has an overhead of 8 bytes and any extra bytes to satisfy
-the requested alignment.
-*/
 import "base:runtime"
 
+/*
+Rollback stack default block size.
+*/
 ROLLBACK_STACK_DEFAULT_BLOCK_SIZE :: 4 * Megabyte
 
 /*
+Rollback stack max head block size.
+
 This limitation is due to the size of `prev_ptr`, but it is only for the
 head block; any allocation in excess of the allocator's `block_size` is
 valid, so long as the block allocator can handle it.
@@ -43,12 +19,18 @@ within is freed; they are immediately returned to the block allocator.
 */
 ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE :: 2 * Gigabyte
 
+/*
+Allocation header of the rollback stack allocator.
+*/
 Rollback_Stack_Header :: bit_field u64 {
 	prev_offset:  uintptr | 32,
 	is_free:         bool |  1,
 	prev_ptr:     uintptr | 31,
 }
 
+/*
+Block header of the rollback stack allocator.
+*/
 Rollback_Stack_Block :: struct {
 	next_block: ^Rollback_Stack_Block,
 	last_alloc: rawptr,
@@ -56,6 +38,9 @@ Rollback_Stack_Block :: struct {
 	buffer: []byte,
 }
 
+/*
+Rollback stack allocator data.
+*/
 Rollback_Stack :: struct {
 	head: ^Rollback_Stack_Block,
 	block_size: int,
@@ -111,6 +96,9 @@ rb_rollback_block :: proc(block: ^Rollback_Stack_Block, header: ^Rollback_Stack_
 	}
 }
 
+/*
+Free memory to a rollback stack allocator.
+*/
 @(private="file", require_results)
 rb_free :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> Allocator_Error {
 	parent, block, header := rb_find_ptr(stack, ptr) or_return
@@ -129,6 +117,9 @@ rb_free :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> Allocator_Error {
 	return nil
 }
 
+/*
+Free all memory owned by the rollback stack allocator.
+*/
 @(private="file")
 rb_free_all :: proc(stack: ^Rollback_Stack) {
 	for block := stack.head.next_block; block != nil; /**/ {
@@ -142,14 +133,16 @@ rb_free_all :: proc(stack: ^Rollback_Stack) {
 	stack.head.offset = 0
 }
 
+/*
+Resize an allocation made on a rollback stack allocator.
+*/
 @(private="file", require_results)
-rb_resize :: proc(stack: ^Rollback_Stack, ptr: rawptr, old_size, size, alignment: int) -> (result: []byte, err: Allocator_Error) {
+rb_resize_non_zeroed :: proc(stack: ^Rollback_Stack, ptr: rawptr, old_size, size, alignment: int) -> (result: []byte, err: Allocator_Error) {
 	if ptr != nil {
 		if block, _, ok := rb_find_last_alloc(stack, ptr); ok {
 			// `block.offset` should never underflow because it is contingent
 			// on `old_size` in the first place, assuming sane arguments.
 			assert(block.offset >= cast(uintptr)old_size, "Rollback Stack Allocator received invalid `old_size`.")
-
 			if block.offset + cast(uintptr)size - cast(uintptr)old_size < cast(uintptr)len(block.buffer) {
 				// Prevent singleton allocations from fragmenting by forbidding
 				// them to shrink, removing the possibility of overflow bugs.
@@ -160,27 +153,26 @@ rb_resize :: proc(stack: ^Rollback_Stack, ptr: rawptr, old_size, size, alignment
 			}
 		}
 	}
-
-	result = rb_alloc(stack, size, alignment) or_return
+	result = rb_alloc_non_zeroed(stack, size, alignment) or_return
 	runtime.mem_copy_non_overlapping(raw_data(result), ptr, old_size)
 	err = rb_free(stack, ptr)
-
 	return
 }
 
+/*
+Allocate memory using the rollback stack allocator.
+*/
 @(private="file", require_results)
-rb_alloc :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byte, err: Allocator_Error) {
+rb_alloc_non_zeroed :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byte, err: Allocator_Error) {
 	parent: ^Rollback_Stack_Block
 	for block := stack.head; /**/; block = block.next_block {
 		when !ODIN_DISABLE_ASSERT {
 			allocated_new_block: bool
 		}
-
 		if block == nil {
 			if stack.block_allocator.procedure == nil {
 				return nil, .Out_Of_Memory
 			}
-
 			minimum_size_required := size_of(Rollback_Stack_Header) + size + alignment - 1
 			new_block_size := max(minimum_size_required, stack.block_size)
 			block = rb_make_block(new_block_size, stack.block_allocator) or_return
@@ -189,10 +181,8 @@ rb_alloc :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byt
 				allocated_new_block = true
 			}
 		}
-
 		start := raw_data(block.buffer)[block.offset:]
 		padding := cast(uintptr)calc_padding_with_header(cast(uintptr)start, cast(uintptr)alignment, size_of(Rollback_Stack_Header))
-
 		if block.offset + padding + cast(uintptr)size > cast(uintptr)len(block.buffer) {
 			when !ODIN_DISABLE_ASSERT {
 				if allocated_new_block {
@@ -202,54 +192,50 @@ rb_alloc :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byt
 			parent = block
 			continue
 		}
-
 		header := cast(^Rollback_Stack_Header)(start[padding - size_of(Rollback_Stack_Header):])
 		ptr := start[padding:]
-
 		header^ = {
 			prev_offset = block.offset,
 			prev_ptr = uintptr(0) if block.last_alloc == nil else cast(uintptr)block.last_alloc - cast(uintptr)raw_data(block.buffer),
 			is_free = false,
 		}
-
 		block.last_alloc = ptr
 		block.offset += padding + cast(uintptr)size
-
 		if len(block.buffer) > stack.block_size {
 			// This block exceeds the allocator's standard block size and is considered a singleton.
 			// Prevent any further allocations on it.
 			block.offset = cast(uintptr)len(block.buffer)
 		}
-		
 		#no_bounds_check return ptr[:size], nil
 	}
-
 	return nil, .Out_Of_Memory
 }
 
 @(private="file", require_results)
 rb_make_block :: proc(size: int, allocator: Allocator) -> (block: ^Rollback_Stack_Block, err: Allocator_Error) {
 	buffer := runtime.mem_alloc(size_of(Rollback_Stack_Block) + size, align_of(Rollback_Stack_Block), allocator) or_return
-
 	block = cast(^Rollback_Stack_Block)raw_data(buffer)
 	#no_bounds_check block.buffer = buffer[size_of(Rollback_Stack_Block):]
 	return
 }
 
-
+/*
+Initialize the rollback stack allocator using a fixed backing buffer.
+*/
 rollback_stack_init_buffered :: proc(stack: ^Rollback_Stack, buffer: []byte, location := #caller_location) {
 	MIN_SIZE :: size_of(Rollback_Stack_Block) + size_of(Rollback_Stack_Header) + size_of(rawptr)
 	assert(len(buffer) >= MIN_SIZE, "User-provided buffer to Rollback Stack Allocator is too small.", location)
-
 	block := cast(^Rollback_Stack_Block)raw_data(buffer)
 	block^ = {}
 	#no_bounds_check block.buffer = buffer[size_of(Rollback_Stack_Block):]
-
 	stack^ = {}
 	stack.head = block
 	stack.block_size = len(block.buffer)
 }
 
+/*
+Initialize the rollback stack alocator using a backing block allocator.
+*/
 rollback_stack_init_dynamic :: proc(
 	stack: ^Rollback_Stack,
 	block_size : int = ROLLBACK_STACK_DEFAULT_BLOCK_SIZE,
@@ -262,22 +248,25 @@ rollback_stack_init_dynamic :: proc(
 		// size is insufficient; check only on platforms with big enough ints.
 		assert(block_size <= ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE, "Rollback Stack Allocators cannot support head blocks larger than 2 gigabytes.", location)
 	}
-
 	block := rb_make_block(block_size, block_allocator) or_return
-
 	stack^ = {}
 	stack.head = block
 	stack.block_size = block_size
 	stack.block_allocator = block_allocator
-
 	return nil
 }
 
+/*
+Initialize the rollback stack.
+*/
 rollback_stack_init :: proc {
 	rollback_stack_init_buffered,
 	rollback_stack_init_dynamic,
 }
 
+/*
+Destroy a rollback stack.
+*/
 rollback_stack_destroy :: proc(stack: ^Rollback_Stack) {
 	if stack.block_allocator.procedure != nil {
 		rb_free_all(stack)
@@ -286,6 +275,37 @@ rollback_stack_destroy :: proc(stack: ^Rollback_Stack) {
 	stack^ = {}
 }
 
+/*
+Rollback stack allocator.
+
+The Rollback Stack Allocator was designed for the test runner to be fast,
+able to grow, and respect the Tracking Allocator's requirement for
+individual frees. It is not overly concerned with fragmentation, however.
+
+It has support for expansion when configured with a block allocator and
+limited support for out-of-order frees.
+
+Allocation has constant-time best and usual case performance.
+At worst, it is linear according to the number of memory blocks.
+
+Allocation follows a first-fit strategy when there are multiple memory
+blocks.
+
+Freeing has constant-time best and usual case performance.
+At worst, it is linear according to the number of memory blocks and number
+of freed items preceding the last item in a block.
+
+Resizing has constant-time performance, if it's the last item in a block, or
+the new size is smaller. Naturally, this becomes linear-time if there are
+multiple blocks to search for the pointer's owning block. Otherwise, the
+allocator defaults to a combined alloc & free operation internally.
+
+Out-of-order freeing is accomplished by collapsing a run of freed items
+from the last allocation backwards.
+
+Each allocation has an overhead of 8 bytes and any extra bytes to satisfy
+the requested alignment.
+*/
 @(require_results)
 rollback_stack_allocator :: proc(stack: ^Rollback_Stack) -> Allocator {
 	return Allocator {
@@ -309,38 +329,31 @@ rollback_stack_allocator_proc :: proc(
 	case .Alloc, .Alloc_Non_Zeroed:
 		assert(size >= 0, "Size must be positive or zero.", location)
 		assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", location)
-		result = rb_alloc(stack, size, alignment) or_return
-
+		result = rb_alloc_non_zeroed(stack, size, alignment) or_return
 		if mode == .Alloc {
 			zero_slice(result)
 		}
-
 	case .Free:
 		err = rb_free(stack, old_memory)
 
 	case .Free_All:
 		rb_free_all(stack)
-
 	case .Resize, .Resize_Non_Zeroed:
 		assert(size >= 0, "Size must be positive or zero.", location)
 		assert(old_size >= 0, "Old size must be positive or zero.", location)
 		assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", location)
-		result = rb_resize(stack, old_memory, old_size, size, alignment) or_return
-
+		result = rb_resize_non_zeroed(stack, old_memory, old_size, size, alignment) or_return
 		#no_bounds_check if mode == .Resize && size > old_size {
 			zero_slice(result[old_size:])
 		}
-
 	case .Query_Features:
 		set := (^Allocator_Mode_Set)(old_memory)
 		if set != nil {
 			set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed}
 		}
 		return nil, nil
-
 	case .Query_Info:
 		return nil, .Mode_Not_Implemented
 	}
-
 	return
 }

+ 78 - 36
core/mem/tracking_allocator.odin

@@ -4,50 +4,38 @@ package mem
 import "base:runtime"
 import "core:sync"
 
+/*
+Allocation entry for the tracking allocator.
+
+This structure stores the data related to an allocation.
+*/
 Tracking_Allocator_Entry :: struct {
-	memory:    rawptr,
-	size:      int,
+	// Pointer to an allocated region.
+	memory: rawptr,
+	// Size of the allocated memory region.
+	size: int,
+	// Requested alignment.
 	alignment: int,
-	mode:      Allocator_Mode,
-	err:       Allocator_Error,
+	// Mode of the operation.
+	mode: Allocator_Mode,
+	// Error.
+	err: Allocator_Error,
+	// Location of the allocation.
 	location:  runtime.Source_Code_Location,
 }
 
+/*
+Bad free entry for a tracking allocator.
+*/
 Tracking_Allocator_Bad_Free_Entry :: struct {
-	memory:   rawptr,
+	// Pointer, on which free operation was called.
+	memory: rawptr,
+	// The source location of where the operation was called.
 	location: runtime.Source_Code_Location,
 }
 
 /*
-An example of how to use the `Tracking_Allocator` to track subsequent allocations
-in your program and report leaks and bad frees:
-
-Example:
-
-	package foo
-
-	import "core:mem"
-	import "core:fmt"
-
-	_main :: proc() {
-		// do stuff
-	}
-
-	main :: proc() {
-		track: mem.Tracking_Allocator
-		mem.tracking_allocator_init(&track, context.allocator)
-		defer mem.tracking_allocator_destroy(&track)
-		context.allocator = mem.tracking_allocator(&track)
-
-		_main()
-
-		for _, leak in track.allocation_map {
-			fmt.printf("%v leaked %m\n", leak.location, leak.size)
-		}
-		for bad_free in track.bad_free_array {
-			fmt.printf("%v allocation %p was freed badly\n", bad_free.location, bad_free.memory)
-		}
-	}
+Tracking allocator data.
 */
 Tracking_Allocator :: struct {
 	backing: Allocator,
@@ -63,6 +51,13 @@ Tracking_Allocator :: struct {
 	current_memory_allocated: i64,
 }
 
+/*
+Initialize the tracking allocator.
+
+This procedure initializes the tracking allocator `t` with a backing allocator
+specified with `backing_allocator`. The `internals_allocator` will used to
+allocate the tracked data.
+*/
 tracking_allocator_init :: proc(t: ^Tracking_Allocator, backing_allocator: Allocator, internals_allocator := context.allocator) {
 	t.backing = backing_allocator
 	t.allocation_map.allocator = internals_allocator
@@ -72,12 +67,22 @@ tracking_allocator_init :: proc(t: ^Tracking_Allocator, backing_allocator: Alloc
 	}
 }
 
+/*
+Destroy the tracking allocator.
+*/
 tracking_allocator_destroy :: proc(t: ^Tracking_Allocator) {
 	delete(t.allocation_map)
 	delete(t.bad_free_array)
 }
 
-// Clear only the current allocation data while keeping the totals intact.
+/*
+Clear the tracking allocator.
+
+This procedure clears the tracked data from a tracking allocator.
+
+**Note**: This procedure clears only the current allocation data while keeping
+the totals intact.
+*/
 tracking_allocator_clear :: proc(t: ^Tracking_Allocator) {
 	sync.mutex_lock(&t.mutex)
 	clear(&t.allocation_map)
@@ -86,7 +91,11 @@ tracking_allocator_clear :: proc(t: ^Tracking_Allocator) {
 	sync.mutex_unlock(&t.mutex)
 }
 
-// Reset all of a Tracking Allocator's allocation data back to zero.
+/*
+Reset the tracking allocator.
+
+Reset all of a Tracking Allocator's allocation data back to zero.
+*/
 tracking_allocator_reset :: proc(t: ^Tracking_Allocator) {
 	sync.mutex_lock(&t.mutex)
 	clear(&t.allocation_map)
@@ -100,6 +109,39 @@ tracking_allocator_reset :: proc(t: ^Tracking_Allocator) {
 	sync.mutex_unlock(&t.mutex)
 }
 
+/*
+Tracking allocator.
+
+The tracking allocator is an allocator wrapper that tracks memory allocations.
+This allocator stores all the allocations in a map. Whenever a pointer that's
+not inside of the map is freed, the `bad_free_array` entry is added.
+
+An example of how to use the `Tracking_Allocator` to track subsequent allocations
+in your program and report leaks and bad frees:
+
+Example:
+
+	package foo
+
+	import "core:mem"
+	import "core:fmt"
+
+	main :: proc() {
+		track: mem.Tracking_Allocator
+		mem.tracking_allocator_init(&track, context.allocator)
+		defer mem.tracking_allocator_destroy(&track)
+		context.allocator = mem.tracking_allocator(&track)
+
+		do_stuff()
+
+		for _, leak in track.allocation_map {
+			fmt.printf("%v leaked %m\n", leak.location, leak.size)
+		}
+		for bad_free in track.bad_free_array {
+			fmt.printf("%v allocation %p was freed badly\n", bad_free.location, bad_free.memory)
+		}
+	}
+*/
 @(require_results)
 tracking_allocator :: proc(data: ^Tracking_Allocator) -> Allocator {
 	return Allocator{