Browse Source

Refactor `core:mem/tlsf`, add `free_all` support.

TODO: Allow the TLSF allocator to add additional pools when it would ordinarily OOM
      by calling its backing allocator.
Jeroen van Rijn 5 months ago
parent
commit
7088284ff4
3 changed files with 387 additions and 263 deletions
  1. 29 11
      core/mem/tlsf/tlsf.odin
  2. 256 252
      core/mem/tlsf/tlsf_internal.odin
  3. 102 0
      tests/core/mem/test_core_mem.odin

+ 29 - 11
core/mem/tlsf/tlsf.odin

@@ -10,6 +10,7 @@
 // package mem_tlsf implements a Two Level Segregated Fit memory allocator.
 // package mem_tlsf implements a Two Level Segregated Fit memory allocator.
 package mem_tlsf
 package mem_tlsf
 
 
+import "base:intrinsics"
 import "base:runtime"
 import "base:runtime"
 
 
 Error :: enum byte {
 Error :: enum byte {
@@ -42,7 +43,7 @@ Allocator :: struct {
 
 
 	// If we're expected to grow when we run out of memory,
 	// If we're expected to grow when we run out of memory,
 	// how much should we ask the backing allocator for?
 	// how much should we ask the backing allocator for?
-	new_pool_size: int,
+	new_pool_size: uint,
 
 
 }
 }
 #assert(size_of(Allocator) % ALIGN_SIZE == 0)
 #assert(size_of(Allocator) % ALIGN_SIZE == 0)
@@ -74,12 +75,11 @@ init_from_buffer :: proc(control: ^Allocator, buf: []byte) -> Error {
 		allocator = {},
 		allocator = {},
 	}
 	}
 
 
-	clear(control)
-	return pool_add(control, buf[:])
+	return free_all(control)
 }
 }
 
 
 @(require_results)
 @(require_results)
-init_from_allocator :: proc(control: ^Allocator, backing: runtime.Allocator, initial_pool_size: int, new_pool_size := 0) -> Error {
+init_from_allocator :: proc(control: ^Allocator, backing: runtime.Allocator, initial_pool_size: int) -> Error {
 	assert(control != nil)
 	assert(control != nil)
 	pool_bytes := align_up(uint(initial_pool_size), ALIGN_SIZE) + INITIAL_POOL_OVERHEAD
 	pool_bytes := align_up(uint(initial_pool_size), ALIGN_SIZE) + INITIAL_POOL_OVERHEAD
 	if pool_bytes < BLOCK_SIZE_MIN {
 	if pool_bytes < BLOCK_SIZE_MIN {
@@ -98,8 +98,9 @@ init_from_allocator :: proc(control: ^Allocator, backing: runtime.Allocator, ini
 		allocator = backing,
 		allocator = backing,
 	}
 	}
 
 
-	clear(control)
-	return pool_add(control, buf[:])
+	// TODO(Jeroen): Add automatically growing the pools from the backing allocator
+
+	return free_all(control)
 }
 }
 init :: proc{init_from_buffer, init_from_allocator}
 init :: proc{init_from_buffer, init_from_allocator}
 
 
@@ -112,8 +113,6 @@ destroy :: proc(control: ^Allocator) {
 
 
 	// No need to call `pool_remove` or anything, as they're they're embedded in the backing memory.
 	// No need to call `pool_remove` or anything, as they're they're embedded in the backing memory.
 	// We do however need to free the `Pool` tracking entities and the backing memory itself.
 	// We do however need to free the `Pool` tracking entities and the backing memory itself.
-	// As `Allocator` is embedded in the first backing slice, the `control` pointer will be
-	// invalid after this call.
 	for p := control.pool.next; p != nil; {
 	for p := control.pool.next; p != nil; {
 		next := p.next
 		next := p.next
 
 
@@ -145,9 +144,8 @@ allocator_proc :: proc(allocator_data: rawptr, mode: runtime.Allocator_Mode,
 		return nil, nil
 		return nil, nil
 
 
 	case .Free_All:
 	case .Free_All:
-		// NOTE: this doesn't work right at the moment, Jeroen has it on his to-do list :)
-		// clear(control)
-		return nil, .Mode_Not_Implemented
+		free_all(control)
+		return nil, nil
 
 
 	case .Resize:
 	case .Resize:
 		return resize(control, old_memory, uint(old_size), uint(size), uint(alignment))
 		return resize(control, old_memory, uint(old_size), uint(size), uint(alignment))
@@ -168,3 +166,23 @@ allocator_proc :: proc(allocator_data: rawptr, mode: runtime.Allocator_Mode,
 
 
 	return nil, nil
 	return nil, nil
 }
 }
+
+// Exported solely to facilitate testing
+@(require_results)
+ffs :: proc "contextless" (word: u32) -> (bit: i32) {
+	return -1 if word == 0 else i32(intrinsics.count_trailing_zeros(word))
+}
+
+// Exported solely to facilitate testing
+@(require_results)
+fls :: proc "contextless" (word: u32) -> (bit: i32) {
+	N :: (size_of(u32) * 8) - 1
+	return i32(N - intrinsics.count_leading_zeros(word))
+}
+
+// Exported solely to facilitate testing
+@(require_results)
+fls_uint :: proc "contextless" (size: uint) -> (bit: i32) {
+	N :: (size_of(uint) * 8) - 1
+	return i32(N - intrinsics.count_leading_zeros(size))
+}

+ 256 - 252
core/mem/tlsf/tlsf_internal.odin

@@ -7,7 +7,6 @@
 		Jeroen van Rijn: Source port
 		Jeroen van Rijn: Source port
 */
 */
 
 
-
 package mem_tlsf
 package mem_tlsf
 
 
 import "base:intrinsics"
 import "base:intrinsics"
@@ -106,6 +105,220 @@ bits for `FL_INDEX`.
 BLOCK_SIZE_MIN :: uint(size_of(Block_Header) - size_of(^Block_Header))
 BLOCK_SIZE_MIN :: uint(size_of(Block_Header) - size_of(^Block_Header))
 BLOCK_SIZE_MAX :: uint(1) << FL_INDEX_MAX
 BLOCK_SIZE_MAX :: uint(1) << FL_INDEX_MAX
 
 
+// Clear control structure and point all empty lists at the null block
+@(private)
+free_all :: proc(control: ^Allocator) -> (err: Error) {
+	// Clear internal structures
+	control.block_null.next_free = &control.block_null
+	control.block_null.prev_free = &control.block_null
+	control.fl_bitmap = 0
+	for i in 0..<FL_INDEX_COUNT {
+		control.sl_bitmap[i] = 0
+		for j in 0..<SL_INDEX_COUNT {
+			control.blocks[i][j] = &control.block_null
+		}
+	}
+
+	// Add backing pool(s)
+	for p := &control.pool; p != nil; p = p.next {
+		pool_add(control, p.data) or_return
+	}
+	return
+}
+
+@(private, require_results)
+pool_add :: proc(control: ^Allocator, pool: []u8) -> (err: Error) {
+	assert(uintptr(raw_data(pool)) % ALIGN_SIZE == 0, "Added memory must be aligned")
+
+	pool_overhead := POOL_OVERHEAD
+	pool_bytes := align_down(len(pool) - pool_overhead, ALIGN_SIZE)
+
+	if pool_bytes < BLOCK_SIZE_MIN {
+		return .Backing_Buffer_Too_Small
+	} else if pool_bytes > BLOCK_SIZE_MAX {
+		return .Backing_Buffer_Too_Large
+	}
+
+	// Create the main free block. Offset the start of the block slightly,
+	// so that the `prev_phys_block` field falls outside of the pool -
+	// it will never be used.
+	block := offset_to_block_backwards(raw_data(pool), BLOCK_HEADER_OVERHEAD)
+
+	block_set_size(block, pool_bytes)
+	block_set_free(block)
+	block_set_prev_used(block)
+	block_insert(control, block)
+
+	// Split the block to create a zero-size sentinel block
+	next := block_link_next(block)
+	block_set_size(next, 0)
+	block_set_used(next)
+	block_set_prev_free(next)
+
+	return
+}
+
+@(private)
+pool_remove :: proc(control: ^Allocator, pool: []u8) {
+	block := offset_to_block_backwards(raw_data(pool), BLOCK_HEADER_OVERHEAD)
+
+	assert(block_is_free(block),               "Block should be free")
+	assert(!block_is_free(block_next(block)),  "Next block should not be free")
+	assert(block_size(block_next(block)) == 0, "Next block size should be zero")
+
+	fl, sl := mapping_insert(block_size(block))
+	remove_free_block(control, block, fl, sl)
+}
+
+@(private, require_results)
+alloc_bytes_non_zeroed :: proc(control: ^Allocator, size: uint, align: uint) -> (res: []byte, err: runtime.Allocator_Error) {
+	assert(control != nil)
+	adjust := adjust_request_size(size, ALIGN_SIZE)
+
+	GAP_MINIMUM :: size_of(Block_Header)
+	size_with_gap := adjust_request_size(adjust + align + GAP_MINIMUM, align)
+
+	aligned_size := size_with_gap if adjust != 0 && align > ALIGN_SIZE else adjust
+	if aligned_size == 0 && size > 0 {
+		return nil, .Out_Of_Memory
+	}
+
+	block := block_locate_free(control, aligned_size)
+	if block == nil {
+		return nil, .Out_Of_Memory
+	}
+	ptr := block_to_ptr(block)
+	aligned := align_ptr(ptr, align)
+	gap := uint(int(uintptr(aligned)) - int(uintptr(ptr)))
+
+	if gap != 0 && gap < GAP_MINIMUM {
+		gap_remain := GAP_MINIMUM - gap
+		offset := uintptr(max(gap_remain, align))
+		next_aligned := rawptr(uintptr(aligned) + offset)
+
+		aligned = align_ptr(next_aligned, align)
+
+		gap = uint(int(uintptr(aligned)) - int(uintptr(ptr)))
+	}
+
+	if gap != 0 {
+		assert(gap >= GAP_MINIMUM, "gap size too small")
+		block = block_trim_free_leading(control, block, gap)
+	}
+
+	return block_prepare_used(control, block, adjust)
+}
+
+@(private, require_results)
+alloc_bytes :: proc(control: ^Allocator, size: uint, align: uint) -> (res: []byte, err: runtime.Allocator_Error) {
+	res, err = alloc_bytes_non_zeroed(control, size, align)
+	if err == nil {
+		intrinsics.mem_zero(raw_data(res), len(res))
+	}
+	return
+}
+
+
+free_with_size :: proc(control: ^Allocator, ptr: rawptr, size: uint) {
+	assert(control != nil)
+	// `size` is currently ignored
+	if ptr == nil {
+		return
+	}
+
+	block := block_from_ptr(ptr)
+	assert(!block_is_free(block), "block already marked as free") // double free
+	block_mark_as_free(block)
+	block = block_merge_prev(control, block)
+	block = block_merge_next(control, block)
+	block_insert(control, block)
+}
+
+
+@(private, require_results)
+resize :: proc(control: ^Allocator, ptr: rawptr, old_size, new_size: uint, alignment: uint) -> (res: []byte, err: runtime.Allocator_Error) {
+	assert(control != nil)
+	if ptr != nil && new_size == 0 {
+		free_with_size(control, ptr, old_size)
+		return
+	} else if ptr == nil {
+		return alloc_bytes(control, new_size, alignment)
+	}
+
+	block := block_from_ptr(ptr)
+	next := block_next(block)
+
+	curr_size := block_size(block)
+	combined := curr_size + block_size(next) + BLOCK_HEADER_OVERHEAD
+	adjust := adjust_request_size(new_size, max(ALIGN_SIZE, alignment))
+
+	assert(!block_is_free(block), "block already marked as free") // double free
+
+	min_size := min(curr_size, new_size, old_size)
+
+	if adjust > curr_size && (!block_is_free(next) || adjust > combined) {
+		res = alloc_bytes(control, new_size, alignment) or_return
+		if res != nil {
+			copy(res, ([^]byte)(ptr)[:min_size])
+			free_with_size(control, ptr, curr_size)
+		}
+		return
+	}
+	if adjust > curr_size {
+		_ = block_merge_next(control, block)
+		block_mark_as_used(block)
+	}
+
+	block_trim_used(control, block, adjust)
+	res = ([^]byte)(ptr)[:new_size]
+
+	if min_size < new_size {
+		to_zero := ([^]byte)(ptr)[min_size:new_size]
+		runtime.mem_zero(raw_data(to_zero), len(to_zero))
+	}
+	return
+}
+
+@(private, require_results)
+resize_non_zeroed :: proc(control: ^Allocator, ptr: rawptr, old_size, new_size: uint, alignment: uint) -> (res: []byte, err: runtime.Allocator_Error) {
+	assert(control != nil)
+	if ptr != nil && new_size == 0 {
+		free_with_size(control, ptr, old_size)
+		return
+	} else if ptr == nil {
+		return alloc_bytes_non_zeroed(control, new_size, alignment)
+	}
+
+	block := block_from_ptr(ptr)
+	next := block_next(block)
+
+	curr_size := block_size(block)
+	combined := curr_size + block_size(next) + BLOCK_HEADER_OVERHEAD
+	adjust := adjust_request_size(new_size, max(ALIGN_SIZE, alignment))
+
+	assert(!block_is_free(block), "block already marked as free") // double free
+
+	min_size := min(curr_size, new_size, old_size)
+
+	if adjust > curr_size && (!block_is_free(next) || adjust > combined) {
+		res = alloc_bytes_non_zeroed(control, new_size, alignment) or_return
+		if res != nil {
+			copy(res, ([^]byte)(ptr)[:min_size])
+			free_with_size(control, ptr, old_size)
+		}
+		return
+	}
+
+	if adjust > curr_size {
+		_ = block_merge_next(control, block)
+		block_mark_as_used(block)
+	}
+
+	block_trim_used(control, block, adjust)
+	res = ([^]byte)(ptr)[:new_size]
+	return
+}
+
 /*
 /*
 	TLSF achieves O(1) cost for `alloc` and `free` operations by limiting
 	TLSF achieves O(1) cost for `alloc` and `free` operations by limiting
 	the search for a free block to a free list of guaranteed size
 	the search for a free block to a free list of guaranteed size
@@ -116,106 +329,95 @@ BLOCK_SIZE_MAX :: uint(1) << FL_INDEX_MAX
 	NOTE: TLSF spec relies on ffs/fls returning a value in the range 0..31.
 	NOTE: TLSF spec relies on ffs/fls returning a value in the range 0..31.
 */
 */
 
 
-@(require_results)
-ffs :: proc "contextless" (word: u32) -> (bit: i32) {
-	return -1 if word == 0 else i32(intrinsics.count_trailing_zeros(word))
-}
-
-@(require_results)
-fls :: proc "contextless" (word: u32) -> (bit: i32) {
-	N :: (size_of(u32) * 8) - 1
-	return i32(N - intrinsics.count_leading_zeros(word))
-}
-
-@(require_results)
-fls_uint :: proc "contextless" (size: uint) -> (bit: i32) {
-	N :: (size_of(uint) * 8) - 1
-	return i32(N - intrinsics.count_leading_zeros(size))
-}
-
-@(require_results)
+@(private, require_results)
 block_size :: proc "contextless" (block: ^Block_Header) -> (size: uint) {
 block_size :: proc "contextless" (block: ^Block_Header) -> (size: uint) {
 	return block.size &~ (BLOCK_HEADER_FREE | BLOCK_HEADER_PREV_FREE)
 	return block.size &~ (BLOCK_HEADER_FREE | BLOCK_HEADER_PREV_FREE)
 }
 }
 
 
+@(private)
 block_set_size :: proc "contextless" (block: ^Block_Header, size: uint) {
 block_set_size :: proc "contextless" (block: ^Block_Header, size: uint) {
 	old_size := block.size
 	old_size := block.size
 	block.size = size | (old_size & (BLOCK_HEADER_FREE | BLOCK_HEADER_PREV_FREE))
 	block.size = size | (old_size & (BLOCK_HEADER_FREE | BLOCK_HEADER_PREV_FREE))
 }
 }
 
 
-@(require_results)
+@(private, require_results)
 block_is_last :: proc "contextless" (block: ^Block_Header) -> (is_last: bool) {
 block_is_last :: proc "contextless" (block: ^Block_Header) -> (is_last: bool) {
 	return block_size(block) == 0
 	return block_size(block) == 0
 }
 }
 
 
-@(require_results)
+@(private, require_results)
 block_is_free :: proc "contextless" (block: ^Block_Header) -> (is_free: bool) {
 block_is_free :: proc "contextless" (block: ^Block_Header) -> (is_free: bool) {
 	return (block.size & BLOCK_HEADER_FREE) == BLOCK_HEADER_FREE
 	return (block.size & BLOCK_HEADER_FREE) == BLOCK_HEADER_FREE
 }
 }
 
 
+@(private)
 block_set_free :: proc "contextless" (block: ^Block_Header) {
 block_set_free :: proc "contextless" (block: ^Block_Header) {
 	block.size |= BLOCK_HEADER_FREE
 	block.size |= BLOCK_HEADER_FREE
 }
 }
 
 
+@(private)
 block_set_used :: proc "contextless" (block: ^Block_Header) {
 block_set_used :: proc "contextless" (block: ^Block_Header) {
 	block.size &~= BLOCK_HEADER_FREE
 	block.size &~= BLOCK_HEADER_FREE
 }
 }
 
 
-@(require_results)
+@(private, require_results)
 block_is_prev_free :: proc "contextless" (block: ^Block_Header) -> (is_prev_free: bool) {
 block_is_prev_free :: proc "contextless" (block: ^Block_Header) -> (is_prev_free: bool) {
 	return (block.size & BLOCK_HEADER_PREV_FREE) == BLOCK_HEADER_PREV_FREE
 	return (block.size & BLOCK_HEADER_PREV_FREE) == BLOCK_HEADER_PREV_FREE
 }
 }
 
 
+@(private)
 block_set_prev_free :: proc "contextless" (block: ^Block_Header) {
 block_set_prev_free :: proc "contextless" (block: ^Block_Header) {
 	block.size |= BLOCK_HEADER_PREV_FREE
 	block.size |= BLOCK_HEADER_PREV_FREE
 }
 }
 
 
+@(private)
 block_set_prev_used :: proc "contextless" (block: ^Block_Header) {
 block_set_prev_used :: proc "contextless" (block: ^Block_Header) {
 	block.size &~= BLOCK_HEADER_PREV_FREE
 	block.size &~= BLOCK_HEADER_PREV_FREE
 }
 }
 
 
-@(require_results)
+@(private, require_results)
 block_from_ptr :: proc(ptr: rawptr) -> (block_ptr: ^Block_Header) {
 block_from_ptr :: proc(ptr: rawptr) -> (block_ptr: ^Block_Header) {
 	return (^Block_Header)(uintptr(ptr) - BLOCK_START_OFFSET)
 	return (^Block_Header)(uintptr(ptr) - BLOCK_START_OFFSET)
 }
 }
 
 
-@(require_results)
+@(private, require_results)
 block_to_ptr   :: proc(block: ^Block_Header) -> (ptr: rawptr) {
 block_to_ptr   :: proc(block: ^Block_Header) -> (ptr: rawptr) {
 	return rawptr(uintptr(block) + BLOCK_START_OFFSET)
 	return rawptr(uintptr(block) + BLOCK_START_OFFSET)
 }
 }
 
 
 // Return location of next block after block of given size.
 // Return location of next block after block of given size.
-@(require_results)
+@(private, require_results)
 offset_to_block :: proc(ptr: rawptr, size: uint) -> (block: ^Block_Header) {
 offset_to_block :: proc(ptr: rawptr, size: uint) -> (block: ^Block_Header) {
 	return (^Block_Header)(uintptr(ptr) + uintptr(size))
 	return (^Block_Header)(uintptr(ptr) + uintptr(size))
 }
 }
 
 
-@(require_results)
+@(private, require_results)
 offset_to_block_backwards :: proc(ptr: rawptr, size: uint) -> (block: ^Block_Header) {
 offset_to_block_backwards :: proc(ptr: rawptr, size: uint) -> (block: ^Block_Header) {
 	return (^Block_Header)(uintptr(ptr) - uintptr(size))
 	return (^Block_Header)(uintptr(ptr) - uintptr(size))
 }
 }
 
 
 // Return location of previous block.
 // Return location of previous block.
-@(require_results)
+@(private, require_results)
 block_prev :: proc(block: ^Block_Header) -> (prev: ^Block_Header) {
 block_prev :: proc(block: ^Block_Header) -> (prev: ^Block_Header) {
 	assert(block_is_prev_free(block), "previous block must be free")
 	assert(block_is_prev_free(block), "previous block must be free")
 	return block.prev_phys_block
 	return block.prev_phys_block
 }
 }
 
 
 // Return location of next existing block.
 // Return location of next existing block.
-@(require_results)
+@(private, require_results)
 block_next :: proc(block: ^Block_Header) -> (next: ^Block_Header) {
 block_next :: proc(block: ^Block_Header) -> (next: ^Block_Header) {
 	return offset_to_block(block_to_ptr(block), block_size(block) - BLOCK_HEADER_OVERHEAD)
 	return offset_to_block(block_to_ptr(block), block_size(block) - BLOCK_HEADER_OVERHEAD)
 }
 }
 
 
 // Link a new block with its physical neighbor, return the neighbor.
 // Link a new block with its physical neighbor, return the neighbor.
-@(require_results)
+@(private, require_results)
 block_link_next :: proc(block: ^Block_Header) -> (next: ^Block_Header) {
 block_link_next :: proc(block: ^Block_Header) -> (next: ^Block_Header) {
 	next = block_next(block)
 	next = block_next(block)
 	next.prev_phys_block = block
 	next.prev_phys_block = block
 	return
 	return
 }
 }
 
 
+@(private)
 block_mark_as_free :: proc(block: ^Block_Header) {
 block_mark_as_free :: proc(block: ^Block_Header) {
 	// Link the block to the next block, first.
 	// Link the block to the next block, first.
 	next := block_link_next(block)
 	next := block_link_next(block)
@@ -223,25 +425,26 @@ block_mark_as_free :: proc(block: ^Block_Header) {
 	block_set_free(block)
 	block_set_free(block)
 }
 }
 
 
+@(private)
 block_mark_as_used :: proc(block: ^Block_Header) {
 block_mark_as_used :: proc(block: ^Block_Header) {
 	next := block_next(block)
 	next := block_next(block)
 	block_set_prev_used(next)
 	block_set_prev_used(next)
 	block_set_used(block)
 	block_set_used(block)
 }
 }
 
 
-@(require_results)
+@(private, require_results)
 align_up :: proc(x, align: uint) -> (aligned: uint) {
 align_up :: proc(x, align: uint) -> (aligned: uint) {
 	assert(0 == (align & (align - 1)), "must align to a power of two")
 	assert(0 == (align & (align - 1)), "must align to a power of two")
 	return (x + (align - 1)) &~ (align - 1)
 	return (x + (align - 1)) &~ (align - 1)
 }
 }
 
 
-@(require_results)
+@(private, require_results)
 align_down :: proc(x, align: uint) -> (aligned: uint) {
 align_down :: proc(x, align: uint) -> (aligned: uint) {
 	assert(0 == (align & (align - 1)), "must align to a power of two")
 	assert(0 == (align & (align - 1)), "must align to a power of two")
 	return x - (x & (align - 1))
 	return x - (x & (align - 1))
 }
 }
 
 
-@(require_results)
+@(private, require_results)
 align_ptr :: proc(ptr: rawptr, align: uint) -> (aligned: rawptr) {
 align_ptr :: proc(ptr: rawptr, align: uint) -> (aligned: rawptr) {
 	assert(0 == (align & (align - 1)), "must align to a power of two")
 	assert(0 == (align & (align - 1)), "must align to a power of two")
 	align_mask := uintptr(align) - 1
 	align_mask := uintptr(align) - 1
@@ -251,7 +454,7 @@ align_ptr :: proc(ptr: rawptr, align: uint) -> (aligned: rawptr) {
 }
 }
 
 
 // Adjust an allocation size to be aligned to word size, and no smaller than internal minimum.
 // Adjust an allocation size to be aligned to word size, and no smaller than internal minimum.
-@(require_results)
+@(private, require_results)
 adjust_request_size :: proc(size, align: uint) -> (adjusted: uint) {
 adjust_request_size :: proc(size, align: uint) -> (adjusted: uint) {
 	if size == 0 {
 	if size == 0 {
 		return 0
 		return 0
@@ -265,7 +468,7 @@ adjust_request_size :: proc(size, align: uint) -> (adjusted: uint) {
 }
 }
 
 
 // Adjust an allocation size to be aligned to word size, and no smaller than internal minimum.
 // Adjust an allocation size to be aligned to word size, and no smaller than internal minimum.
-@(require_results)
+@(private, require_results)
 adjust_request_size_with_err :: proc(size, align: uint) -> (adjusted: uint, err: runtime.Allocator_Error) {
 adjust_request_size_with_err :: proc(size, align: uint) -> (adjusted: uint, err: runtime.Allocator_Error) {
 	if size == 0 {
 	if size == 0 {
 		return 0, nil
 		return 0, nil
@@ -283,7 +486,7 @@ adjust_request_size_with_err :: proc(size, align: uint) -> (adjusted: uint, err:
 // TLSF utility functions. In most cases these are direct translations of
 // TLSF utility functions. In most cases these are direct translations of
 // the documentation in the research paper.
 // the documentation in the research paper.
 
 
-@(optimization_mode="favor_size", require_results)
+@(optimization_mode="favor_size", private, require_results)
 mapping_insert :: proc(size: uint) -> (fl, sl: i32) {
 mapping_insert :: proc(size: uint) -> (fl, sl: i32) {
 	if size < SMALL_BLOCK_SIZE {
 	if size < SMALL_BLOCK_SIZE {
 		// Store small blocks in first list.
 		// Store small blocks in first list.
@@ -296,7 +499,7 @@ mapping_insert :: proc(size: uint) -> (fl, sl: i32) {
 	return
 	return
 }
 }
 
 
-@(optimization_mode="favor_size", require_results)
+@(optimization_mode="favor_size", private, require_results)
 mapping_round :: #force_inline proc(size: uint) -> (rounded: uint) {
 mapping_round :: #force_inline proc(size: uint) -> (rounded: uint) {
 	rounded = size
 	rounded = size
 	if size >= SMALL_BLOCK_SIZE {
 	if size >= SMALL_BLOCK_SIZE {
@@ -307,12 +510,12 @@ mapping_round :: #force_inline proc(size: uint) -> (rounded: uint) {
 }
 }
 
 
 // This version rounds up to the next block size (for allocations)
 // This version rounds up to the next block size (for allocations)
-@(optimization_mode="favor_size", require_results)
+@(optimization_mode="favor_size", private, require_results)
 mapping_search :: proc(size: uint) -> (fl, sl: i32) {
 mapping_search :: proc(size: uint) -> (fl, sl: i32) {
 	return mapping_insert(mapping_round(size))
 	return mapping_insert(mapping_round(size))
 }
 }
 
 
-@(require_results)
+@(private, require_results)
 search_suitable_block :: proc(control: ^Allocator, fli, sli: ^i32) -> (block: ^Block_Header) {
 search_suitable_block :: proc(control: ^Allocator, fli, sli: ^i32) -> (block: ^Block_Header) {
 	// First, search for a block in the list associated with the given fl/sl index.
 	// First, search for a block in the list associated with the given fl/sl index.
 	fl := fli^; sl := sli^
 	fl := fli^; sl := sli^
@@ -339,6 +542,7 @@ search_suitable_block :: proc(control: ^Allocator, fli, sli: ^i32) -> (block: ^B
 }
 }
 
 
 // Remove a free block from the free list.
 // Remove a free block from the free list.
+@(private)
 remove_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl: i32) {
 remove_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl: i32) {
 	prev := block.prev_free
 	prev := block.prev_free
 	next := block.next_free
 	next := block.next_free
@@ -364,6 +568,7 @@ remove_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl
 }
 }
 
 
 // Insert a free block into the free block list.
 // Insert a free block into the free block list.
+@(private)
 insert_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl: i32) {
 insert_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl: i32) {
 	current := control.blocks[fl][sl]
 	current := control.blocks[fl][sl]
 	assert(current != nil, "free lists cannot have a nil entry")
 	assert(current != nil, "free lists cannot have a nil entry")
@@ -381,24 +586,26 @@ insert_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl
 }
 }
 
 
 // Remove a given block from the free list.
 // Remove a given block from the free list.
+@(private)
 block_remove :: proc(control: ^Allocator, block: ^Block_Header) {
 block_remove :: proc(control: ^Allocator, block: ^Block_Header) {
 	fl, sl := mapping_insert(block_size(block))
 	fl, sl := mapping_insert(block_size(block))
 	remove_free_block(control, block, fl, sl)
 	remove_free_block(control, block, fl, sl)
 }
 }
 
 
 // Insert a given block into the free list.
 // Insert a given block into the free list.
+@(private)
 block_insert :: proc(control: ^Allocator, block: ^Block_Header) {
 block_insert :: proc(control: ^Allocator, block: ^Block_Header) {
 	fl, sl := mapping_insert(block_size(block))
 	fl, sl := mapping_insert(block_size(block))
 	insert_free_block(control, block, fl, sl)
 	insert_free_block(control, block, fl, sl)
 }
 }
 
 
-@(require_results)
+@(private, require_results)
 block_can_split :: proc(block: ^Block_Header, size: uint) -> (can_split: bool) {
 block_can_split :: proc(block: ^Block_Header, size: uint) -> (can_split: bool) {
 	return block_size(block) >= size_of(Block_Header) + size
 	return block_size(block) >= size_of(Block_Header) + size
 }
 }
 
 
 // Split a block into two, the second of which is free.
 // Split a block into two, the second of which is free.
-@(require_results)
+@(private, require_results)
 block_split :: proc(block: ^Block_Header, size: uint) -> (remaining: ^Block_Header) {
 block_split :: proc(block: ^Block_Header, size: uint) -> (remaining: ^Block_Header) {
 	// Calculate the amount of space left in the remaining block.
 	// Calculate the amount of space left in the remaining block.
 	remaining = offset_to_block(block_to_ptr(block), size - BLOCK_HEADER_OVERHEAD)
 	remaining = offset_to_block(block_to_ptr(block), size - BLOCK_HEADER_OVERHEAD)
@@ -419,7 +626,7 @@ block_split :: proc(block: ^Block_Header, size: uint) -> (remaining: ^Block_Head
 }
 }
 
 
 // Absorb a free block's storage into an adjacent previous free block.
 // Absorb a free block's storage into an adjacent previous free block.
-@(require_results)
+@(private, require_results)
 block_absorb :: proc(prev: ^Block_Header, block: ^Block_Header) -> (absorbed: ^Block_Header) {
 block_absorb :: proc(prev: ^Block_Header, block: ^Block_Header) -> (absorbed: ^Block_Header) {
 	assert(!block_is_last(prev), "previous block can't be last")
 	assert(!block_is_last(prev), "previous block can't be last")
 	// Note: Leaves flags untouched.
 	// Note: Leaves flags untouched.
@@ -429,7 +636,7 @@ block_absorb :: proc(prev: ^Block_Header, block: ^Block_Header) -> (absorbed: ^B
 }
 }
 
 
 // Merge a just-freed block with an adjacent previous free block.
 // Merge a just-freed block with an adjacent previous free block.
-@(require_results)
+@(private, require_results)
 block_merge_prev :: proc(control: ^Allocator, block: ^Block_Header) -> (merged: ^Block_Header) {
 block_merge_prev :: proc(control: ^Allocator, block: ^Block_Header) -> (merged: ^Block_Header) {
 	merged = block
 	merged = block
 	if (block_is_prev_free(block)) {
 	if (block_is_prev_free(block)) {
@@ -443,7 +650,7 @@ block_merge_prev :: proc(control: ^Allocator, block: ^Block_Header) -> (merged:
 }
 }
 
 
 // Merge a just-freed block with an adjacent free block.
 // Merge a just-freed block with an adjacent free block.
-@(require_results)
+@(private, require_results)
 block_merge_next :: proc(control: ^Allocator, block: ^Block_Header) -> (merged: ^Block_Header) {
 block_merge_next :: proc(control: ^Allocator, block: ^Block_Header) -> (merged: ^Block_Header) {
 	merged = block
 	merged = block
 	next  := block_next(block)
 	next  := block_next(block)
@@ -458,6 +665,7 @@ block_merge_next :: proc(control: ^Allocator, block: ^Block_Header) -> (merged:
 }
 }
 
 
 // Trim any trailing block space off the end of a free block, return to pool.
 // Trim any trailing block space off the end of a free block, return to pool.
+@(private)
 block_trim_free :: proc(control: ^Allocator, block: ^Block_Header, size: uint) {
 block_trim_free :: proc(control: ^Allocator, block: ^Block_Header, size: uint) {
 	assert(block_is_free(block), "block must be free")
 	assert(block_is_free(block), "block must be free")
 	if (block_can_split(block, size)) {
 	if (block_can_split(block, size)) {
@@ -469,6 +677,7 @@ block_trim_free :: proc(control: ^Allocator, block: ^Block_Header, size: uint) {
 }
 }
 
 
 // Trim any trailing block space off the end of a used block, return to pool.
 // Trim any trailing block space off the end of a used block, return to pool.
+@(private)
 block_trim_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint) {
 block_trim_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint) {
 	assert(!block_is_free(block), "Block must be used")
 	assert(!block_is_free(block), "Block must be used")
 	if (block_can_split(block, size)) {
 	if (block_can_split(block, size)) {
@@ -482,7 +691,7 @@ block_trim_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint) {
 }
 }
 
 
 // Trim leading block space, return to pool.
 // Trim leading block space, return to pool.
-@(require_results)
+@(private, require_results)
 block_trim_free_leading :: proc(control: ^Allocator, block: ^Block_Header, size: uint) -> (remaining: ^Block_Header) {
 block_trim_free_leading :: proc(control: ^Allocator, block: ^Block_Header, size: uint) -> (remaining: ^Block_Header) {
 	remaining = block
 	remaining = block
 	if block_can_split(block, size) {
 	if block_can_split(block, size) {
@@ -496,7 +705,7 @@ block_trim_free_leading :: proc(control: ^Allocator, block: ^Block_Header, size:
 	return remaining
 	return remaining
 }
 }
 
 
-@(require_results)
+@(private, require_results)
 block_locate_free :: proc(control: ^Allocator, size: uint) -> (block: ^Block_Header) {
 block_locate_free :: proc(control: ^Allocator, size: uint) -> (block: ^Block_Header) {
 	fl, sl: i32
 	fl, sl: i32
 	if size != 0 {
 	if size != 0 {
@@ -520,7 +729,7 @@ block_locate_free :: proc(control: ^Allocator, size: uint) -> (block: ^Block_Hea
 	return block
 	return block
 }
 }
 
 
-@(require_results)
+@(private, require_results)
 block_prepare_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint) -> (res: []byte, err: runtime.Allocator_Error) {
 block_prepare_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint) -> (res: []byte, err: runtime.Allocator_Error) {
 	if block != nil {
 	if block != nil {
 		assert(size != 0, "Size must be non-zero")
 		assert(size != 0, "Size must be non-zero")
@@ -529,209 +738,4 @@ block_prepare_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint
 		res = ([^]byte)(block_to_ptr(block))[:size]
 		res = ([^]byte)(block_to_ptr(block))[:size]
 	}
 	}
 	return
 	return
-}
-
-// Clear control structure and point all empty lists at the null block
-clear :: proc(control: ^Allocator) {
-	control.block_null.next_free = &control.block_null
-	control.block_null.prev_free = &control.block_null
-
-	control.fl_bitmap = 0
-	for i in 0..<FL_INDEX_COUNT {
-		control.sl_bitmap[i] = 0
-		for j in 0..<SL_INDEX_COUNT {
-			control.blocks[i][j] = &control.block_null
-		}
-	}
-}
-
-@(require_results)
-pool_add :: proc(control: ^Allocator, pool: []u8) -> (err: Error) {
-	assert(uintptr(raw_data(pool)) % ALIGN_SIZE == 0, "Added memory must be aligned")
-
-	pool_overhead := POOL_OVERHEAD
-	pool_bytes := align_down(len(pool) - pool_overhead, ALIGN_SIZE)
-
-	if pool_bytes < BLOCK_SIZE_MIN {
-		return .Backing_Buffer_Too_Small
-	} else if pool_bytes > BLOCK_SIZE_MAX {
-		return .Backing_Buffer_Too_Large
-	}
-
-	// Create the main free block. Offset the start of the block slightly,
-	// so that the `prev_phys_block` field falls outside of the pool -
-	// it will never be used.
-	block := offset_to_block_backwards(raw_data(pool), BLOCK_HEADER_OVERHEAD)
-
-	block_set_size(block, pool_bytes)
-	block_set_free(block)
-	block_set_prev_used(block)
-	block_insert(control, block)
-
-	// Split the block to create a zero-size sentinel block
-	next := block_link_next(block)
-	block_set_size(next, 0)
-	block_set_used(next)
-	block_set_prev_free(next)
-	return
-}
-
-pool_remove :: proc(control: ^Allocator, pool: []u8) {
-	block := offset_to_block_backwards(raw_data(pool), BLOCK_HEADER_OVERHEAD)
-
-	assert(block_is_free(block),               "Block should be free")
-	assert(!block_is_free(block_next(block)),  "Next block should not be free")
-	assert(block_size(block_next(block)) == 0, "Next block size should be zero")
-
-	fl, sl := mapping_insert(block_size(block))
-	remove_free_block(control, block, fl, sl)
-}
-
-@(require_results)
-alloc_bytes_non_zeroed :: proc(control: ^Allocator, size: uint, align: uint) -> (res: []byte, err: runtime.Allocator_Error) {
-	assert(control != nil)
-	adjust := adjust_request_size(size, ALIGN_SIZE)
-
-	GAP_MINIMUM :: size_of(Block_Header)
-	size_with_gap := adjust_request_size(adjust + align + GAP_MINIMUM, align)
-
-	aligned_size := size_with_gap if adjust != 0 && align > ALIGN_SIZE else adjust
-	if aligned_size == 0 && size > 0 {
-		return nil, .Out_Of_Memory
-	}
-
-	block  := block_locate_free(control, aligned_size)
-	if block == nil {
-		return nil, .Out_Of_Memory
-	}
-	ptr := block_to_ptr(block)
-	aligned := align_ptr(ptr, align)
-	gap := uint(int(uintptr(aligned)) - int(uintptr(ptr)))
-
-	if gap != 0 && gap < GAP_MINIMUM {
-		gap_remain := GAP_MINIMUM - gap
-		offset := uintptr(max(gap_remain, align))
-		next_aligned := rawptr(uintptr(aligned) + offset)
-
-		aligned = align_ptr(next_aligned, align)
-
-		gap = uint(int(uintptr(aligned)) - int(uintptr(ptr)))
-	}
-
-	if gap != 0 {
-		assert(gap >= GAP_MINIMUM, "gap size too small")
-		block = block_trim_free_leading(control, block, gap)
-	}
-
-	return block_prepare_used(control, block, adjust)
-}
-
-@(require_results)
-alloc_bytes :: proc(control: ^Allocator, size: uint, align: uint) -> (res: []byte, err: runtime.Allocator_Error) {
-	res, err = alloc_bytes_non_zeroed(control, size, align)
-	if err == nil {
-		intrinsics.mem_zero(raw_data(res), len(res))
-	}
-	return
-}
-
-
-free_with_size :: proc(control: ^Allocator, ptr: rawptr, size: uint) {
-	assert(control != nil)
-	// `size` is currently ignored
-	if ptr == nil {
-		return
-	}
-
-	block := block_from_ptr(ptr)
-	assert(!block_is_free(block), "block already marked as free") // double free
-	block_mark_as_free(block)
-	block = block_merge_prev(control, block)
-	block = block_merge_next(control, block)
-	block_insert(control, block)
-}
-
-
-@(require_results)
-resize :: proc(control: ^Allocator, ptr: rawptr, old_size, new_size: uint, alignment: uint) -> (res: []byte, err: runtime.Allocator_Error) {
-	assert(control != nil)
-	if ptr != nil && new_size == 0 {
-		free_with_size(control, ptr, old_size)
-		return
-	} else if ptr == nil {
-		return alloc_bytes(control, new_size, alignment)
-	}
-
-	block := block_from_ptr(ptr)
-	next := block_next(block)
-
-	curr_size := block_size(block)
-	combined := curr_size + block_size(next) + BLOCK_HEADER_OVERHEAD
-	adjust := adjust_request_size(new_size, max(ALIGN_SIZE, alignment))
-
-	assert(!block_is_free(block), "block already marked as free") // double free
-
-	min_size := min(curr_size, new_size, old_size)
-
-	if adjust > curr_size && (!block_is_free(next) || adjust > combined) {
-		res = alloc_bytes(control, new_size, alignment) or_return
-		if res != nil {
-			copy(res, ([^]byte)(ptr)[:min_size])
-			free_with_size(control, ptr, curr_size)
-		}
-		return
-	}
-	if adjust > curr_size {
-		_ = block_merge_next(control, block)
-		block_mark_as_used(block)
-	}
-
-	block_trim_used(control, block, adjust)
-	res = ([^]byte)(ptr)[:new_size]
-
-	if min_size < new_size {
-		to_zero := ([^]byte)(ptr)[min_size:new_size]
-		runtime.mem_zero(raw_data(to_zero), len(to_zero))
-	}
-	return
-}
-
-@(require_results)
-resize_non_zeroed :: proc(control: ^Allocator, ptr: rawptr, old_size, new_size: uint, alignment: uint) -> (res: []byte, err: runtime.Allocator_Error) {
-	assert(control != nil)
-	if ptr != nil && new_size == 0 {
-		free_with_size(control, ptr, old_size)
-		return
-	} else if ptr == nil {
-		return alloc_bytes_non_zeroed(control, new_size, alignment)
-	}
-
-	block := block_from_ptr(ptr)
-	next := block_next(block)
-
-	curr_size := block_size(block)
-	combined := curr_size + block_size(next) + BLOCK_HEADER_OVERHEAD
-	adjust := adjust_request_size(new_size, max(ALIGN_SIZE, alignment))
-
-	assert(!block_is_free(block), "block already marked as free") // double free
-
-	min_size := min(curr_size, new_size, old_size)
-
-	if adjust > curr_size && (!block_is_free(next) || adjust > combined) {
-		res = alloc_bytes_non_zeroed(control, new_size, alignment) or_return
-		if res != nil {
-			copy(res, ([^]byte)(ptr)[:min_size])
-			free_with_size(control, ptr, old_size)
-		}
-		return
-	}
-
-	if adjust > curr_size {
-		_ = block_merge_next(control, block)
-		block_mark_as_used(block)
-	}
-
-	block_trim_used(control, block, adjust)
-	res = ([^]byte)(ptr)[:new_size]
-	return
-}
+}

+ 102 - 0
tests/core/mem/test_core_mem.odin

@@ -1,8 +1,10 @@
 package test_core_mem
 package test_core_mem
 
 
+import "core:mem"
 import "core:mem/tlsf"
 import "core:mem/tlsf"
 import "core:mem/virtual"
 import "core:mem/virtual"
 import "core:testing"
 import "core:testing"
+import "core:slice"
 
 
 @test
 @test
 test_tlsf_bitscan :: proc(t: ^testing.T) {
 test_tlsf_bitscan :: proc(t: ^testing.T) {
@@ -54,3 +56,103 @@ test_align_bumping_block_limit :: proc(t: ^testing.T) {
 	testing.expect_value(t, err, nil)
 	testing.expect_value(t, err, nil)
 	testing.expect(t, len(data) == 896)
 	testing.expect(t, len(data) == 896)
 }
 }
+
+@(test)
+tlsf_test_overlap_and_zero :: proc(t: ^testing.T) {
+	default_allocator := context.allocator
+	alloc: tlsf.Allocator
+	defer tlsf.destroy(&alloc)
+
+	NUM_ALLOCATIONS :: 1_000
+	BACKING_SIZE    :: NUM_ALLOCATIONS * (1_000 + size_of(uintptr))
+
+	if err := tlsf.init_from_allocator(&alloc, default_allocator, BACKING_SIZE); err != .None {
+		testing.fail_now(t, "TLSF init error")
+	}
+	context.allocator = tlsf.allocator(&alloc)
+
+	allocations := make([dynamic][]byte, 0, NUM_ALLOCATIONS, default_allocator)
+	defer delete(allocations)
+
+	err: mem.Allocator_Error
+	s:   []byte
+
+	for size := 1; err == .None && size <= NUM_ALLOCATIONS; size += 1 {
+		s, err = make([]byte, size)
+		append(&allocations, s)
+	}
+
+	slice.sort_by(allocations[:len(allocations)], proc(a, b: []byte) -> bool {
+		return uintptr(raw_data(a)) < uintptr(raw_data((b)))
+	})
+
+	for i in 0..<len(allocations) - 1 {
+		fail_if_allocations_overlap(t, allocations[i], allocations[i + 1])
+		fail_if_not_zeroed(t, allocations[i])
+	}
+}
+
+@(test)
+tlsf_test_free_all :: proc(t: ^testing.T) {
+	default_allocator := context.allocator
+	alloc: tlsf.Allocator
+	defer tlsf.destroy(&alloc)
+
+	NUM_ALLOCATIONS :: 10
+	ALLOCATION_SIZE :: mem.Megabyte
+	BACKING_SIZE    :: NUM_ALLOCATIONS * (ALLOCATION_SIZE + size_of(uintptr))
+
+	if init_err := tlsf.init_from_allocator(&alloc, default_allocator, BACKING_SIZE); init_err != .None {
+		testing.fail_now(t, "TLSF init error")
+	}
+	context.allocator = tlsf.allocator(&alloc)
+
+	allocations: [2][dynamic][]byte
+	allocations[0] = make([dynamic][]byte, 0, NUM_ALLOCATIONS, default_allocator) // After `init`
+	allocations[1] = make([dynamic][]byte, 0, NUM_ALLOCATIONS, default_allocator) // After `free_all`
+	defer {
+		delete(allocations[0])
+		delete(allocations[1])
+	}
+
+	for {
+		s := make([]byte, ALLOCATION_SIZE) or_break
+		append(&allocations[0], s)
+	}
+	testing.expect(t, len(allocations[0]) >= 10)
+
+	free_all(tlsf.allocator(&alloc))
+
+	for {
+		s := make([]byte, ALLOCATION_SIZE) or_break
+		append(&allocations[1], s)
+	}
+	testing.expect(t, len(allocations[1]) >= 10)
+
+	for i in 0..<len(allocations[0]) {
+		s0, s1 := allocations[0][i], allocations[1][i]
+		assert(raw_data(s0) ==  raw_data((s1)))
+		assert(len(s0)      ==  len((s1)))
+	}
+}
+
+fail_if_not_zeroed :: proc(t: ^testing.T, a: []byte) {
+	for b in a {
+		if b != 0 {
+			testing.fail_now(t, "Allocation wasn't zeroed")
+		}
+	}
+}
+
+fail_if_allocations_overlap :: proc(t: ^testing.T, a, b: []byte) {
+	a, b := a, b
+
+	a_start := uintptr(raw_data(a))
+	a_end   := a_start + uintptr(len(a))
+	b_start := uintptr(raw_data(b))
+	b_end   := b_start + uintptr(len(b))
+
+	if a_end >= b_end && b_end >= a_start {
+		testing.fail_now(t, "Allocations overlapped")
+	}
+}