Browse Source

Improve default temp_allocator; make nil loggers do nothing; improve mem.Scratch_Allocator behaviour

gingerBill 4 years ago
parent
commit
6eeb12a986
4 changed files with 195 additions and 130 deletions
  1. 6 0
      core/log/log.odin
  2. 99 60
      core/mem/allocators.odin
  3. 2 2
      core/runtime/core.odin
  4. 88 68
      core/runtime/default_allocators.odin

+ 6 - 0
core/log/log.odin

@@ -123,6 +123,9 @@ panicf :: proc(fmt_str: string, args: ..any, location := #caller_location) -> !
 
 log :: proc(level: Level, args: ..any, sep := " ", location := #caller_location) {
 	logger := context.logger;
+	if logger.procedure == nil {
+		return;
+	}
 	if level < logger.lowest_level {
 		return;
 	}
@@ -132,6 +135,9 @@ log :: proc(level: Level, args: ..any, sep := " ", location := #caller_location)
 
 logf :: proc(level: Level, fmt_str: string, args: ..any, location := #caller_location) {
 	logger := context.logger;
+	if logger.procedure == nil {
+		return;
+	}
 	if level < logger.lowest_level {
 		return;
 	}

+ 99 - 60
core/mem/allocators.odin

@@ -107,106 +107,143 @@ end_arena_temp_memory :: proc(using tmp: Arena_Temp_Memory) {
 
 
 Scratch_Allocator :: struct {
-	data:     []byte,
-	curr_offset: int,
-	prev_offset: int,
-	backup_allocator: Allocator,
+	data:               []byte,
+	curr_offset:        int,
+	prev_allocation:   rawptr,
+	backup_allocator:   Allocator,
 	leaked_allocations: [dynamic]rawptr,
-	default_to_default_allocator: bool,
 }
 
-scratch_allocator_init :: proc(scratch: ^Scratch_Allocator, data: []byte, backup_allocator := context.allocator) {
-	scratch.data = data;
-	scratch.curr_offset = 0;
-	scratch.prev_offset = 0;
-	scratch.backup_allocator = backup_allocator;
+scratch_allocator_init :: proc(s: ^Scratch_Allocator, size: int, backup_allocator := context.allocator) {
+	s.data = make_aligned([]byte, size, 2*align_of(rawptr), backup_allocator);
+	s.curr_offset = 0;
+	s.prev_allocation = nil;
+	s.backup_allocator = backup_allocator;
+	s.leaked_allocations.allocator = backup_allocator;
 }
 
-scratch_allocator_destroy :: proc(using scratch: ^Scratch_Allocator) {
-	if scratch == nil {
+scratch_allocator_destroy :: proc(s: ^Scratch_Allocator) {
+	if s == nil {
 		return;
 	}
-	for ptr in leaked_allocations {
-		free(ptr, backup_allocator);
+	for ptr in s.leaked_allocations {
+		free(ptr, s.backup_allocator);
 	}
-	delete(leaked_allocations);
-	delete(data, backup_allocator);
-	scratch^ = {};
+	delete(s.leaked_allocations);
+	delete(s.data, s.backup_allocator);
+	s^ = {};
 }
 
 scratch_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
                                size, alignment: int,
                                old_memory: rawptr, old_size: int, flags: u64 = 0, loc := #caller_location) -> rawptr {
 
-	scratch := (^Scratch_Allocator)(allocator_data);
+	s := (^Scratch_Allocator)(allocator_data);
 
-	if scratch.data == nil {
-		DEFAULT_SCRATCH_BACKING_SIZE :: 1<<22;
+	if s.data == nil {
+		DEFAULT_BACKING_SIZE :: 1<<22;
 		if !(context.allocator.procedure != scratch_allocator_proc &&
 		     context.allocator.data != allocator_data) {
 			panic("cyclic initialization of the scratch allocator with itself");
 		}
-		scratch_allocator_init(scratch, make([]byte, 1<<22));
+		scratch_allocator_init(s, DEFAULT_BACKING_SIZE);
 	}
 
+size := size;
+
 	switch mode {
 	case .Alloc:
+		size = align_forward_int(size, alignment);
+
 		switch {
-		case scratch.curr_offset+size <= len(scratch.data):
-			offset := align_forward_uintptr(uintptr(scratch.curr_offset), uintptr(alignment));
-			ptr := &scratch.data[offset];
-			zero(ptr, size);
-			scratch.prev_offset = int(offset);
-			scratch.curr_offset = int(offset) + size;
-			return ptr;
-		case size <= len(scratch.data):
-			offset := align_forward_uintptr(uintptr(0), uintptr(alignment));
-			ptr := &scratch.data[offset];
-			zero(ptr, size);
-			scratch.prev_offset = int(offset);
-			scratch.curr_offset = int(offset) + size;
-			return ptr;
-		}
-		// TODO(bill): Should leaks be notified about? Should probably use a logging system that is built into the context system
-		a := scratch.backup_allocator;
+		case s.curr_offset+size <= len(s.data):
+			start := uintptr(raw_data(s.data));
+			ptr := start + uintptr(s.curr_offset);
+			ptr = align_forward_uintptr(ptr, uintptr(alignment));
+			zero(rawptr(ptr), size);
+
+			s.prev_allocation = rawptr(ptr);
+			offset := int(ptr - start);
+			s.curr_offset = offset + size;
+			return rawptr(ptr);
+
+		case size <= len(s.data):
+			start := uintptr(raw_data(s.data));
+			ptr := align_forward_uintptr(start, uintptr(alignment));
+			zero(rawptr(ptr), size);
+
+			s.prev_allocation = rawptr(ptr);
+			offset := int(ptr - start);
+			s.curr_offset = offset + size;
+			return rawptr(ptr);
+		}
+		a := s.backup_allocator;
 		if a.procedure == nil {
 			a = context.allocator;
-			scratch.backup_allocator = a;
+			s.backup_allocator = a;
 		}
 
 		ptr := alloc(size, alignment, a, loc);
-		if scratch.leaked_allocations == nil {
-			scratch.leaked_allocations = make([dynamic]rawptr, a);
+		if s.leaked_allocations == nil {
+			s.leaked_allocations = make([dynamic]rawptr, a);
+		}
+		append(&s.leaked_allocations, ptr);
+
+		if logger := context.logger; logger.lowest_level <= .Warning {
+			if logger.procedure != nil {
+				logger.procedure(logger.data, .Warning, "mem.Scratch_Allocator resorted to backup_allocator" , logger.options, loc);
+			}
 		}
-		append(&scratch.leaked_allocations, ptr);
 
 		return ptr;
 
 	case .Free:
-		last_ptr := rawptr(&scratch.data[scratch.prev_offset]);
-		if old_memory == last_ptr {
-			full_size := scratch.curr_offset - scratch.prev_offset;
-			scratch.curr_offset = scratch.prev_offset;
-			zero(last_ptr, full_size);
+		start := uintptr(raw_data(s.data));
+		end := start + uintptr(len(s.data));
+		old_ptr := uintptr(old_memory);
+
+		if s.prev_allocation == old_memory {
+			s.curr_offset = int(uintptr(s.prev_allocation) - uintptr(start));
+			s.prev_allocation = nil;
+			return nil;
+		}
+
+		if start <= old_ptr && old_ptr < end {
+			// NOTE(bill): Cannot free this pointer but it is valid
 			return nil;
 		}
-		// NOTE(bill): It's scratch memory, don't worry about freeing
+
+		if len(s.leaked_allocations) != 0 {
+			for ptr, i in s.leaked_allocations {
+				if ptr == old_memory {
+					free(ptr, s.backup_allocator);
+					ordered_remove(&s.leaked_allocations, i);
+					return nil;
+				}
+			}
+		}
+		panic("invalid pointer passed to default_temp_allocator");
 
 	case .Free_All:
-		scratch.curr_offset = 0;
-		scratch.prev_offset = 0;
-		for ptr in scratch.leaked_allocations {
-			free(ptr, scratch.backup_allocator);
+		s.curr_offset = 0;
+		s.prev_allocation = nil;
+		for ptr in s.leaked_allocations {
+			free(ptr, s.backup_allocator);
 		}
-		clear(&scratch.leaked_allocations);
+		clear(&s.leaked_allocations);
 
 	case .Resize:
-		last_ptr := rawptr(&scratch.data[scratch.prev_offset]);
-		if old_memory == last_ptr && len(scratch.data)-scratch.prev_offset >= size {
-			scratch.curr_offset = scratch.prev_offset+size;
+		begin := uintptr(raw_data(s.data));
+		end := begin + uintptr(len(s.data));
+		old_ptr := uintptr(old_memory);
+		if begin <= old_ptr && old_ptr < end && old_ptr+uintptr(size) < end {
+			s.curr_offset = int(old_ptr-begin)+size;
 			return old_memory;
 		}
-		return scratch_allocator_proc(allocator_data, Allocator_Mode.Alloc, size, alignment, old_memory, old_size, flags, loc);
+		ptr := scratch_allocator_proc(allocator_data, .Alloc, size, alignment, old_memory, old_size, flags, loc);
+		copy(ptr, old_memory, old_size);
+		scratch_allocator_proc(allocator_data, .Free, 0, alignment, old_memory, old_size, flags, loc);
+		return ptr;
 
 	case .Query_Features:
 		set := (^Allocator_Mode_Set)(old_memory);
@@ -219,19 +256,21 @@ scratch_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
 		return nil;
 	}
 
+
 	return nil;
 }
 
-scratch_allocator :: proc(scratch: ^Scratch_Allocator) -> Allocator {
+scratch_allocator :: proc(allocator: ^Scratch_Allocator) -> Allocator {
 	return Allocator{
 		procedure = scratch_allocator_proc,
-		data = scratch,
+		data = allocator,
 	};
 }
 
 
 
 
+
 Stack_Allocation_Header :: struct {
 	prev_offset: int,
 	padding:     int,
@@ -941,7 +980,7 @@ small_allocator :: proc(s: ^$S/Small_Allocator, backing := context.allocator) ->
 
 			p := rawptr(s.curr);
 			s.curr += uintptr(size);
-			return p;
+			return mem_zero(p, size);
 
 		case .Free:
 			// NOP

+ 2 - 2
core/runtime/core.odin

@@ -525,8 +525,8 @@ __init_context :: proc "contextless" (c: ^Context) {
 }
 
 @builtin
-init_global_temporary_allocator :: proc(data: []byte, backup_allocator := context.allocator) {
-	default_temp_allocator_init(&global_default_temp_allocator_data, data, backup_allocator);
+init_global_temporary_allocator :: proc(size: int, backup_allocator := context.allocator) {
+	default_temp_allocator_init(&global_default_temp_allocator_data, size, backup_allocator);
 }
 
 

+ 88 - 68
core/runtime/default_allocators.odin

@@ -27,127 +27,147 @@ when ODIN_DEFAULT_TO_NIL_ALLOCATOR || ODIN_OS == "freestanding" {
 }
 
 
+DEFAULT_TEMP_ALLOCATOR_BACKING_SIZE: int : #config(DEFAULT_TEMP_ALLOCATOR_BACKING_SIZE, 1<<22);
+
 
 Default_Temp_Allocator :: struct {
-	data:     []byte,
-	curr_offset: int,
-	prev_offset: int,
-	backup_allocator: Allocator,
+	data:               []byte,
+	curr_offset:        int,
+	prev_allocation:    rawptr,
+	backup_allocator:   Allocator,
 	leaked_allocations: [dynamic]rawptr,
 }
 
-default_temp_allocator_init :: proc(allocator: ^Default_Temp_Allocator, data: []byte, backup_allocator := context.allocator) {
-	allocator.data = data;
-	allocator.curr_offset = 0;
-	allocator.prev_offset = 0;
-	allocator.backup_allocator = backup_allocator;
-	allocator.leaked_allocations.allocator = backup_allocator;
+default_temp_allocator_init :: proc(s: ^Default_Temp_Allocator, size: int, backup_allocator := context.allocator) {
+	s.data = make_aligned([]byte, size, 2*align_of(rawptr), backup_allocator);
+	s.curr_offset = 0;
+	s.prev_allocation = nil;
+	s.backup_allocator = backup_allocator;
+	s.leaked_allocations.allocator = backup_allocator;
 }
 
-default_temp_allocator_destroy :: proc(using allocator: ^Default_Temp_Allocator) {
-	if allocator == nil {
+default_temp_allocator_destroy :: proc(s: ^Default_Temp_Allocator) {
+	if s == nil {
 		return;
 	}
-	for ptr in leaked_allocations {
-		free(ptr, backup_allocator);
+	for ptr in s.leaked_allocations {
+		free(ptr, s.backup_allocator);
 	}
-	delete(leaked_allocations);
-	delete(data, backup_allocator);
-	allocator^ = {};
+	delete(s.leaked_allocations);
+	delete(s.data, s.backup_allocator);
+	s^ = {};
 }
 
 default_temp_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
                                     size, alignment: int,
                                     old_memory: rawptr, old_size: int, flags: u64 = 0, loc := #caller_location) -> rawptr {
 
-	allocator := (^Default_Temp_Allocator)(allocator_data);
+	s := (^Default_Temp_Allocator)(allocator_data);
 
-	if allocator.data == nil {
-		DEFAULT_SCRATCH_BACKING_SIZE :: 1<<22;
+	if s.data == nil {
 		a := context.allocator;
 		if !(context.allocator.procedure != default_temp_allocator_proc &&
 		     context.allocator.data != allocator_data) {
 			a = default_allocator();
 		}
-		default_temp_allocator_init(allocator, make([]byte, DEFAULT_SCRATCH_BACKING_SIZE, a), a);
+		default_temp_allocator_init(s, DEFAULT_TEMP_ALLOCATOR_BACKING_SIZE, a);
 	}
 
+	size := size;
+
 	switch mode {
 	case .Alloc:
+		size = align_forward_int(size, alignment);
+
 		switch {
-		case allocator.curr_offset+size <= len(allocator.data):
-			offset := align_forward_uintptr(uintptr(allocator.curr_offset), uintptr(alignment));
-			ptr := &allocator.data[offset];
-			mem_zero(ptr, size);
-			allocator.prev_offset = int(offset);
-			allocator.curr_offset = int(offset) + size;
-			return ptr;
-		case size <= len(allocator.data):
-			offset := align_forward_uintptr(uintptr(0), uintptr(alignment));
-			ptr := &allocator.data[offset];
-			mem_zero(ptr, size);
-			allocator.prev_offset = int(offset);
-			allocator.curr_offset = int(offset) + size;
-			return ptr;
+		case s.curr_offset+size <= len(s.data):
+			start := uintptr(raw_data(s.data));
+			ptr := start + uintptr(s.curr_offset);
+			ptr = align_forward_uintptr(ptr, uintptr(alignment));
+			mem_zero(rawptr(ptr), size);
+
+			s.prev_allocation = rawptr(ptr);
+			offset := int(ptr - start);
+			s.curr_offset = offset + size;
+			return rawptr(ptr);
+
+		case size <= len(s.data):
+			start := uintptr(raw_data(s.data));
+			ptr := align_forward_uintptr(start, uintptr(alignment));
+			mem_zero(rawptr(ptr), size);
+
+			s.prev_allocation = rawptr(ptr);
+			offset := int(ptr - start);
+			s.curr_offset = offset + size;
+			return rawptr(ptr);
 		}
-		// TODO(bill): Should leaks be notified about? Should probably use a logging system that is built into the context system
-		a := allocator.backup_allocator;
+		a := s.backup_allocator;
 		if a.procedure == nil {
 			a = context.allocator;
-			allocator.backup_allocator = a;
+			s.backup_allocator = a;
 		}
 
 		ptr := mem_alloc(size, alignment, a, loc);
-		if allocator.leaked_allocations == nil {
-			allocator.leaked_allocations = make([dynamic]rawptr, a);
+		if s.leaked_allocations == nil {
+			s.leaked_allocations = make([dynamic]rawptr, a);
+		}
+		append(&s.leaked_allocations, ptr);
+
+		// TODO(bill): Should leaks be notified about?
+		if logger := context.logger; logger.lowest_level <= .Warning {
+			if logger.procedure != nil {
+				logger.procedure(logger.data, .Warning, "default temp allocator resorted to backup_allocator" , logger.options, loc);
+			}
 		}
-		append(&allocator.leaked_allocations, ptr);
 
 		return ptr;
 
 	case .Free:
-		if len(allocator.data) == 0 {
+		start := uintptr(raw_data(s.data));
+		end := start + uintptr(len(s.data));
+		old_ptr := uintptr(old_memory);
+
+		if s.prev_allocation == old_memory {
+			s.curr_offset = int(uintptr(s.prev_allocation) - uintptr(start));
+			s.prev_allocation = nil;
 			return nil;
 		}
-		last_ptr := rawptr(&allocator.data[allocator.prev_offset]);
-		if old_memory == last_ptr {
-			allocator.curr_offset = allocator.prev_offset;
+
+		if start <= old_ptr && old_ptr < end {
+			// NOTE(bill): Cannot free this pointer but it is valid
 			return nil;
-		} else {
-			#no_bounds_check start, end := &allocator.data[0], &allocator.data[allocator.curr_offset];
-			if start <= old_memory && old_memory < end {
-				// NOTE(bill): Cannot free this pointer
-				return nil;
-			}
+		}
 
-			if len(allocator.leaked_allocations) != 0 {
-				for ptr, i in allocator.leaked_allocations {
-					if ptr == old_memory {
-						free(ptr, allocator.backup_allocator);
-						ordered_remove(&allocator.leaked_allocations, i);
-						return nil;
-					}
+		if len(s.leaked_allocations) != 0 {
+			for ptr, i in s.leaked_allocations {
+				if ptr == old_memory {
+					free(ptr, s.backup_allocator);
+					ordered_remove(&s.leaked_allocations, i);
+					return nil;
 				}
 			}
 		}
-		// NOTE(bill): It's a temporary memory, don't worry about freeing
+		panic("invalid pointer passed to default_temp_allocator");
 
 	case .Free_All:
-		allocator.curr_offset = 0;
-		allocator.prev_offset = 0;
-		for ptr in allocator.leaked_allocations {
-			free(ptr, allocator.backup_allocator);
+		s.curr_offset = 0;
+		s.prev_allocation = nil;
+		for ptr in s.leaked_allocations {
+			free(ptr, s.backup_allocator);
 		}
-		clear(&allocator.leaked_allocations);
+		clear(&s.leaked_allocations);
 
 	case .Resize:
-		last_ptr := #no_bounds_check rawptr(&allocator.data[allocator.prev_offset]);
-		if old_memory == last_ptr && len(allocator.data)-allocator.prev_offset >= size {
-			allocator.curr_offset = allocator.prev_offset+size;
+		begin := uintptr(raw_data(s.data));
+		end := begin + uintptr(len(s.data));
+		old_ptr := uintptr(old_memory);
+		if begin <= old_ptr && old_ptr < end && old_ptr+uintptr(size) < end {
+			s.curr_offset = int(old_ptr-begin)+size;
 			return old_memory;
 		}
-		ptr := default_temp_allocator_proc(allocator_data, Allocator_Mode.Alloc, size, alignment, old_memory, old_size, flags, loc);
+		ptr := default_temp_allocator_proc(allocator_data, .Alloc, size, alignment, old_memory, old_size, flags, loc);
 		mem_copy(ptr, old_memory, old_size);
+		default_temp_allocator_proc(allocator_data, .Free, 0, alignment, old_memory, old_size, flags, loc);
 		return ptr;
 
 	case .Query_Features: