Browse Source

Merge pull request #5037 from Kelimion/tlsf

Allow `core:mem/tlsf` to automatically add new pools.
Jeroen van Rijn 4 months ago
parent
commit
11af4cebb7
3 changed files with 95 additions and 7 deletions
  1. 20 5
      core/mem/tlsf/tlsf.odin
  2. 34 1
      core/mem/tlsf/tlsf_internal.odin
  3. 41 1
      tests/core/mem/test_core_mem.odin

+ 20 - 5
core/mem/tlsf/tlsf.odin

@@ -22,7 +22,6 @@ Error :: enum byte {
 	Backing_Allocator_Error   = 5,
 }
 
-
 Allocator :: struct {
 	// Empty lists point at this block to indicate they are free.
 	block_null: Block_Header,
@@ -44,7 +43,6 @@ Allocator :: struct {
 	// If we're expected to grow when we run out of memory,
 	// how much should we ask the backing allocator for?
 	new_pool_size: uint,
-
 }
 #assert(size_of(Allocator) % ALIGN_SIZE == 0)
 
@@ -56,6 +54,21 @@ allocator :: proc(t: ^Allocator) -> runtime.Allocator {
 	}
 }
 
+// Tries to estimate a pool size sufficient for `count` allocations, each of `size` and with `alignment`.
+estimate_pool_from_size_alignment :: proc(count: int, size: int, alignment: int) -> (pool_size: int) {
+	per_allocation := align_up(uint(size + alignment) + BLOCK_HEADER_OVERHEAD, ALIGN_SIZE)
+	return count * int(per_allocation) + int(INITIAL_POOL_OVERHEAD)
+}
+
+// Tries to estimate a pool size sufficient for `count` allocations of `type`.
+estimate_pool_from_typeid :: proc(count: int, type: typeid) -> (pool_size: int) {
+	ti := type_info_of(type)
+	return estimate_pool_size(count, ti.size, ti.align)
+}
+
+estimate_pool_size :: proc{estimate_pool_from_size_alignment, estimate_pool_from_typeid}
+
+
 @(require_results)
 init_from_buffer :: proc(control: ^Allocator, buf: []byte) -> Error {
 	assert(control != nil)
@@ -63,7 +76,7 @@ init_from_buffer :: proc(control: ^Allocator, buf: []byte) -> Error {
 		return .Invalid_Alignment
 	}
 
-	pool_bytes := align_down(len(buf), ALIGN_SIZE) - INITIAL_POOL_OVERHEAD
+	pool_bytes := align_down(len(buf) - INITIAL_POOL_OVERHEAD, ALIGN_SIZE)
 	if pool_bytes < BLOCK_SIZE_MIN {
 		return .Backing_Buffer_Too_Small
 	} else if pool_bytes > BLOCK_SIZE_MAX {
@@ -79,9 +92,9 @@ init_from_buffer :: proc(control: ^Allocator, buf: []byte) -> Error {
 }
 
 @(require_results)
-init_from_allocator :: proc(control: ^Allocator, backing: runtime.Allocator, initial_pool_size: int) -> Error {
+init_from_allocator :: proc(control: ^Allocator, backing: runtime.Allocator, initial_pool_size: int, new_pool_size := 0) -> Error {
 	assert(control != nil)
-	pool_bytes := align_up(uint(initial_pool_size), ALIGN_SIZE) + INITIAL_POOL_OVERHEAD
+	pool_bytes := uint(estimate_pool_size(1, initial_pool_size, ALIGN_SIZE))
 	if pool_bytes < BLOCK_SIZE_MIN {
 		return .Backing_Buffer_Too_Small
 	} else if pool_bytes > BLOCK_SIZE_MAX {
@@ -98,6 +111,8 @@ init_from_allocator :: proc(control: ^Allocator, backing: runtime.Allocator, ini
 		allocator = backing,
 	}
 
+	control.new_pool_size = uint(new_pool_size)
+
 	// TODO(Jeroen): Add automatically growing the pools from the backing allocator
 
 	return free_all(control)

+ 34 - 1
core/mem/tlsf/tlsf_internal.odin

@@ -185,7 +185,40 @@ alloc_bytes_non_zeroed :: proc(control: ^Allocator, size: uint, align: uint) ->
 
 	block := block_locate_free(control, aligned_size)
 	if block == nil {
-		return nil, .Out_Of_Memory
+		// OOM: Couldn't find block of `aligned_size` bytes.
+		if control.new_pool_size > 0 && control.pool.allocator.procedure != nil {
+			// TLSF is configured to grow. Trying to allocate a new pool of `control.new_pool_size` bytes.
+
+			new_pool_buf := runtime.make_aligned([]byte, control.new_pool_size, ALIGN_SIZE, control.pool.allocator) or_return
+
+			// Add new pool to control structure
+			if pool_add_err := pool_add(control, new_pool_buf); pool_add_err != .None {
+				delete(new_pool_buf, control.pool.allocator)
+				return nil, .Out_Of_Memory
+			}
+
+			// Allocate a new link in the `control.pool` tracking structure.
+			new_pool := new_clone(Pool{
+				data      = new_pool_buf,
+				allocator = control.pool.allocator,
+				next      = nil,
+			}, control.pool.allocator) or_return
+
+			p := &control.pool
+			for p.next != nil {
+				p = p.next
+			}
+			p.next = new_pool
+
+			// Try again to find free block
+			block = block_locate_free(control, aligned_size)
+			if block == nil {
+				return nil, .Out_Of_Memory
+			}
+		} else {
+			// TLSF is non-growing. We're done.
+			return nil, .Out_Of_Memory
+		}
 	}
 	ptr := block_to_ptr(block)
 	aligned := align_ptr(ptr, align)

+ 41 - 1
tests/core/mem/test_core_mem.odin

@@ -82,7 +82,47 @@ tlsf_test_overlap_and_zero :: proc(t: ^testing.T) {
 		append(&allocations, s)
 	}
 
-	slice.sort_by(allocations[:len(allocations)], proc(a, b: []byte) -> bool {
+	slice.sort_by(allocations[:], proc(a, b: []byte) -> bool {
+		return uintptr(raw_data(a)) < uintptr(raw_data((b)))
+	})
+
+	for i in 0..<len(allocations) - 1 {
+		fail_if_allocations_overlap(t, allocations[i], allocations[i + 1])
+		fail_if_not_zeroed(t, allocations[i])
+	}
+}
+
+@(test)
+tlsf_test_grow_pools :: proc(t: ^testing.T) {
+	default_allocator := context.allocator
+	alloc: tlsf.Allocator
+	defer tlsf.destroy(&alloc)
+
+	NUM_ALLOCATIONS    :: 10
+	ALLOC_SIZE         :: mem.Megabyte
+	BACKING_SIZE_INIT  := tlsf.estimate_pool_size(1, ALLOC_SIZE, 64)
+	BACKING_SIZE_GROW  := tlsf.estimate_pool_size(1, ALLOC_SIZE, 64)
+
+	allocations := make([dynamic][]byte, 0, NUM_ALLOCATIONS, default_allocator)
+	defer delete(allocations)
+
+	if err := tlsf.init_from_allocator(&alloc, default_allocator, BACKING_SIZE_INIT, BACKING_SIZE_GROW); err != .None {
+		testing.fail_now(t, "TLSF init error")
+	}
+	context.allocator = tlsf.allocator(&alloc)
+
+	err: mem.Allocator_Error
+	s:   []byte
+
+	for err == .None && len(allocations) < NUM_ALLOCATIONS {
+		s, err = make([]byte, ALLOC_SIZE)
+		testing.expect_value(t, len(s), ALLOC_SIZE)
+		append(&allocations, s)
+	}
+
+	testing.expect_value(t, len(allocations), NUM_ALLOCATIONS)
+
+	slice.sort_by(allocations[:], proc(a, b: []byte) -> bool {
 		return uintptr(raw_data(a)) < uintptr(raw_data((b)))
 	})