Browse Source

Allow `.allocator` for dynamic arrays; Add `mem.Pool`

gingerBill 7 years ago
parent
commit
e9e7ce2606
8 changed files with 252 additions and 141 deletions
  1. 159 1
      core/mem/alloc.odin
  2. 19 59
      core/sync/sync_windows.odin
  3. 3 0
      core/sys/win32/windows.odin
  4. 0 23
      examples/demo/demo.odin
  5. 3 0
      src/check_expr.cpp
  6. 2 2
      src/checker.cpp
  7. 7 7
      src/ir.cpp
  8. 59 49
      src/types.cpp

+ 159 - 1
core/mem/alloc.odin

@@ -75,7 +75,6 @@ free :: proc[
 
 
 
-
 default_resize_align :: proc(old_memory: rawptr, old_size, new_size, alignment: int, loc := #caller_location) -> rawptr {
 	if old_memory == nil do return alloc(new_size, alignment, loc);
 
@@ -108,3 +107,162 @@ nil_allocator :: proc() -> Allocator {
 	};
 }
 
+
+
+
+Pool :: struct {
+	block_size:    int,
+	out_band_size: int,
+	alignment:     int,
+
+	unused_blocks:        [dynamic]rawptr,
+	used_blocks:          [dynamic]rawptr,
+	out_band_allocations: [dynamic]rawptr,
+
+	current_block: rawptr,
+	current_pos:   rawptr,
+	bytes_left:    int,
+
+	block_allocator: Allocator,
+}
+
+
+POOL_BLOCK_SIZE_DEFAULT       :: 65536;
+POOL_OUT_OF_BAND_SIZE_DEFAULT :: 6554;
+
+
+
+pool_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
+                            size, alignment: int,
+                            old_memory: rawptr, old_size: int, flags: u64 = 0, loc := #caller_location) -> rawptr {
+	pool := (^Pool)(allocator_data);
+
+	switch mode {
+	case Allocator_Mode.Alloc:
+		return pool_alloc(pool, size);
+	case Allocator_Mode.Free:
+		panic("Allocator_Mode.Free is not supported for a pool");
+	case Allocator_Mode.Free_All:
+		pool_free_all(pool);
+	case Allocator_Mode.Resize:
+		panic("Allocator_Mode.Resize is not supported for a pool");
+	}
+	return nil;
+}
+
+
+pool_allocator :: proc(pool: ^Pool) -> Allocator {
+	return Allocator{
+		procedure = pool_allocator_proc,
+		data = pool,
+	};
+}
+
+pool_init :: proc(pool: ^Pool,
+                  block_allocator := Allocator{} , array_allocator := Allocator{},
+                  block_size := POOL_BLOCK_SIZE_DEFAULT, out_band_size := POOL_OUT_OF_BAND_SIZE_DEFAULT,
+                  alignment := 8) {
+	pool.block_size = block_size;
+	pool.out_band_size = out_band_size;
+	pool.alignment = alignment;
+
+	if block_allocator.procedure == nil {
+		block_allocator = context.allocator;
+	}
+	if array_allocator.procedure == nil {
+		array_allocator = context.allocator;
+	}
+
+	pool.block_allocator = block_allocator;
+
+	pool.out_band_allocations.allocator = array_allocator;
+	pool.       unused_blocks.allocator = array_allocator;
+	pool.         used_blocks.allocator = array_allocator;
+}
+
+pool_destroy :: proc(using pool: ^Pool) {
+	pool_free_all(pool);
+	free(unused_blocks);
+	free(used_blocks);
+
+	zero(pool, size_of(pool^));
+}
+
+
+pool_alloc :: proc(using pool: ^Pool, bytes: int) -> rawptr {
+	cycle_new_block :: proc(using pool: ^Pool) {
+		if block_allocator.procedure == nil {
+			panic("You must call pool_init on a Pool before using it");
+		}
+
+		if current_block != nil {
+			append(&used_blocks, current_block);
+		}
+
+		new_block: rawptr;
+		if len(unused_blocks) > 0 {
+			new_block = pop(&unused_blocks);
+		} else {
+			new_block = block_allocator.procedure(block_allocator.data, Allocator_Mode.Alloc,
+			                                      block_size, alignment,
+			                                      nil, 0);
+		}
+
+		bytes_left = block_size;
+		current_pos = new_block;
+		current_block = new_block;
+	}
+
+
+	extra := alignment - (bytes % alignment);
+	bytes += extra;
+	if bytes >= out_band_size {
+		assert(block_allocator.procedure != nil);
+		memory := block_allocator.procedure(block_allocator.data, Allocator_Mode.Alloc,
+			                                block_size, alignment,
+			                                nil, 0);
+		if memory != nil {
+			append(&out_band_allocations, (^byte)(memory));
+		}
+		return memory;
+	}
+
+	if bytes_left < bytes {
+		cycle_new_block(pool);
+		if current_block == nil {
+			return nil;
+		}
+	}
+
+	memory := current_pos;
+	current_pos = ptr_offset((^byte)(current_pos), uintptr(bytes));
+	bytes_left -= bytes;
+	return memory;
+}
+
+
+pool_reset :: proc(using pool: ^Pool) {
+	if current_block != nil {
+		append(&unused_blocks, current_block);
+		current_block = nil;
+	}
+
+	for block in used_blocks {
+		append(&unused_blocks, block);
+	}
+	clear(&used_blocks);
+
+	for a in out_band_allocations {
+		free_ptr_with_allocator(block_allocator, a);
+	}
+	clear(&out_band_allocations);
+}
+
+pool_free_all :: proc(using pool: ^Pool) {
+	pool_reset(pool);
+
+	for block in unused_blocks {
+		free_ptr_with_allocator(block_allocator, block);
+	}
+	clear(&unused_blocks);
+}

+ 19 - 59
core/sync/sync_windows.odin

@@ -7,17 +7,12 @@ Semaphore :: struct {
 	_handle: win32.Handle,
 }
 
-/*
 Mutex :: struct {
-	_semaphore: Semaphore,
-	_counter:   i32,
-	_owner:     i32,
-	_recursion: i32,
+	_critical_section: win32.Critical_Section,
 }
-*/
 
-Mutex :: struct {
-	_critical_section: win32.Critical_Section,
+Condition :: struct {
+	event: win32.Handle,
 }
 
 current_thread_id :: proc() -> i32 {
@@ -41,7 +36,8 @@ semaphore_release :: inline proc(s: ^Semaphore) {
 }
 
 semaphore_wait :: proc(s: ^Semaphore) {
-	win32.wait_for_single_object(s._handle, win32.INFINITE);
+	result := win32.wait_for_single_object(s._handle, win32.INFINITE);
+	assert(result != win32.WAIT_FAILED);
 }
 
 
@@ -66,59 +62,23 @@ mutex_unlock :: proc(m: ^Mutex) {
 }
 
 
-
-/*
-mutex_init :: proc(m: ^Mutex) {
-	atomics.store(&m._counter, 0);
-	atomics.store(&m._owner, current_thread_id());
-	semaphore_init(&m._semaphore);
-	m._recursion = 0;
+condition_init :: proc(using c: ^Condition) {
+	event = win32.create_event_a(nil, false, false, nil);
+	assert(event != nil);
 }
-mutex_destroy :: proc(m: ^Mutex) {
-	semaphore_destroy(&m._semaphore);
-}
-mutex_lock :: proc(m: ^Mutex) {
-	thread_id := current_thread_id();
-	if atomics.fetch_add(&m._counter, 1) > 0 {
-		if thread_id != atomics.load(&m._owner) {
-			semaphore_wait(&m._semaphore);
-		}
-	}
-	atomics.store(&m._owner, thread_id);
-	m._recursion++;
+
+condition_signal :: proc(using c: ^Condition) {
+	ok := win32.set_event(event);
+	assert(bool(ok));
 }
-mutex_try_lock :: proc(m: ^Mutex) -> bool {
-	thread_id := current_thread_id();
-	if atomics.load(&m._owner) == thread_id {
-		atomics.fetch_add(&m._counter, 1);
-	} else {
-		expected: i32 = 0;
-		if atomics.load(&m._counter) != 0 {
-			return false;
-		}
-		if atomics.compare_exchange(&m._counter, expected, 1) == 0 {
-			return false;
-		}
-		atomics.store(&m._owner, thread_id);
-	}
-	m._recursion++;
-	return true;
+
+condition_wait_for :: proc(using c: ^Condition) {
+	result := win32.wait_for_single_object(event, win32.INFINITE);
+	assert(result != win32.WAIT_FAILED);
 }
-mutex_unlock :: proc(m: ^Mutex) {
-	recursion: i32;
-	thread_id := current_thread_id();
-	assert(thread_id == atomics.load(&m._owner));
-
-	m._recursion--;
-	recursion = m._recursion;
-	if recursion == 0 {
-		atomics.store(&m._owner, thread_id);
-	}
 
-	if atomics.fetch_add(&m._counter, -1) > 1 {
-		if recursion == 0 {
-			semaphore_release(&m._semaphore);
-		}
+condition_destroy :: proc(using c: ^Condition) {
+	if event != nil {
+		win32.close_handle(event);
 	}
 }
-*/

+ 3 - 0
core/sys/win32/windows.odin

@@ -701,6 +701,9 @@ foreign kernel32 {
 	@(link_name="LeaveCriticalSection")                  leave_critical_section                     :: proc(critical_section: ^Critical_Section) ---;
 
 	@(link_name="CreateEventA") create_event_a :: proc(event_attributes: ^Security_Attributes, manual_reset, initial_state: Bool, name: cstring) -> Handle ---;
+	@(link_name="PulseEvent")   pulse_event    :: proc(event: Handle) -> Bool ---;
+	@(link_name="SetEvent")     set_event      :: proc(event: Handle) -> Bool ---;
+	@(link_name="ResetEvent")   reset_event    :: proc(event: Handle) -> Bool ---;
 
 	@(link_name="LoadLibraryA")   load_library_a   :: proc(c_str: cstring)  -> Hmodule ---;
 	@(link_name="LoadLibraryW")   load_library_w   :: proc(c_str: Wstring) -> Hmodule ---;

+ 0 - 23
examples/demo/demo.odin

@@ -587,28 +587,6 @@ array_programming :: proc() {
 	}
 }
 
-
-using println in import "core:fmt"
-
-using_in :: proc() {
-	fmt.println("# using in");
-	using print in fmt;
-
-	println("Hellope1");
-	print("Hellope2\n");
-
-	Foo :: struct {
-		x, y: int,
-		b: bool,
-	}
-	f: Foo;
-	f.x, f.y = 123, 321;
-	println(f);
-	using x, y in f;
-	x, y = 456, 654;
-	println(f);
-}
-
 named_proc_return_parameters :: proc() {
 	fmt.println("# named proc return parameters");
 
@@ -745,7 +723,6 @@ main :: proc() {
 		parametric_polymorphism();
 		threading_example();
 		array_programming();
-		using_in();
 		named_proc_return_parameters();
 		enum_export();
 		explicit_procedure_overloading();

+ 3 - 0
src/check_expr.cpp

@@ -2648,6 +2648,9 @@ Entity *check_selector(CheckerContext *c, Operand *operand, AstNode *node, Type
 
 	if (entity == nullptr && selector->kind == AstNode_Ident) {
 		String field_name = selector->Ident.token.string;
+		if (is_type_dynamic_array(type_deref(operand->type))) {
+			init_mem_allocator(c->checker);
+		}
 		sel = lookup_field(operand->type, field_name, operand->mode == Addressing_Type);
 		entity = sel.entity;
 

+ 2 - 2
src/checker.cpp

@@ -1588,7 +1588,7 @@ void init_core_type_info(Checker *c) {
 	t_type_info_bit_field_ptr     = alloc_type_pointer(t_type_info_bit_field);
 }
 
-void init_core_allocator(Checker *c) {
+void init_mem_allocator(Checker *c) {
 	if (t_allocator != nullptr) {
 		return;
 	}
@@ -1633,7 +1633,7 @@ void init_core_map_type(Checker *c) {
 
 void init_preload(Checker *c) {
 	init_core_type_info(c);
-	init_core_allocator(c);
+	init_mem_allocator(c);
 	init_core_context(c);
 	init_core_source_code_location(c);
 	init_core_map_type(c);

+ 7 - 7
src/ir.cpp

@@ -2631,9 +2631,9 @@ irValue *ir_emit_struct_ep(irProcedure *proc, irValue *s, i32 index) {
 	} else if (is_type_dynamic_array(t)) {
 		switch (index) {
 		case 0: result_type = alloc_type_pointer(alloc_type_pointer(t->DynamicArray.elem)); break;
-		case 1: result_type = t_int_ptr;                                      break;
-		case 2: result_type = t_int_ptr;                                      break;
-		case 3: result_type = t_allocator_ptr;                                break;
+		case 1: result_type = t_int_ptr;       break;
+		case 2: result_type = t_int_ptr;       break;
+		case 3: result_type = t_allocator_ptr; break;
 		}
 	} /* else if (is_type_map(t)) {
 		init_map_internal_types(t);
@@ -2650,7 +2650,7 @@ irValue *ir_emit_struct_ep(irProcedure *proc, irValue *s, i32 index) {
 		GB_PANIC("TODO(bill): struct_gep type: %s, %d", type_to_string(ir_type(s)), index);
 	}
 
-	GB_ASSERT(result_type != nullptr);
+	GB_ASSERT_MSG(result_type != nullptr, "%s %d", type_to_string(t), index);
 
 	return ir_emit(proc, ir_instr_struct_element_ptr(proc, s, index, result_type));
 }
@@ -2708,9 +2708,9 @@ irValue *ir_emit_struct_ev(irProcedure *proc, irValue *s, i32 index) {
 	case Type_DynamicArray:
 		switch (index) {
 		case 0: result_type = alloc_type_pointer(t->DynamicArray.elem); break;
-		case 1: result_type = t_int;                                      break;
-		case 2: result_type = t_int;                                      break;
-		case 3: result_type = t_allocator;                                break;
+		case 1: result_type = t_int;                                    break;
+		case 2: result_type = t_int;                                    break;
+		case 3: result_type = t_allocator;                              break;
 		}
 		break;
 

+ 59 - 49
src/types.cpp

@@ -1611,55 +1611,6 @@ Selection lookup_field_with_selection(Type *type_, String field_name, bool is_ty
 
 	type = base_type(type);
 
-	if (type->kind == Type_Basic) {
-		switch (type->Basic.kind) {
-		case Basic_any: {
-		#if 1
-			// IMPORTANT TODO(bill): Should these members be available to should I only allow them with
-			// `Raw_Any` type?
-			String data_str = str_lit("data");
-			String typeid_str = str_lit("typeid");
-			gb_local_persist Entity *entity__any_data = alloc_entity_field(nullptr, make_token_ident(data_str), t_rawptr, false, 0);
-			gb_local_persist Entity *entity__any_typeid = alloc_entity_field(nullptr, make_token_ident(typeid_str), t_typeid, false, 1);
-
-			if (field_name == data_str) {
-				selection_add_index(&sel, 0);
-				sel.entity = entity__any_data;;
-				return sel;
-			} else if (field_name == typeid_str) {
-				selection_add_index(&sel, 1);
-				sel.entity = entity__any_typeid;
-				return sel;
-			}
-		#endif
-		} break;
-		}
-
-		return sel;
-	} else if (type->kind == Type_Array) {
-		if (type->Array.count <= 4) {
-			// HACK(bill): Memory leak
-			switch (type->Array.count) {
-			#define _ARRAY_FIELD_CASE(_length, _name) \
-			case (_length): \
-				if (field_name == _name) { \
-					selection_add_index(&sel, (_length)-1); \
-					sel.entity = alloc_entity_array_elem(nullptr, make_token_ident(str_lit(_name)), type->Array.elem, (_length)-1); \
-					return sel; \
-				} \
-				/*fallthrough*/
-
-			_ARRAY_FIELD_CASE(4, "w");
-			_ARRAY_FIELD_CASE(3, "z");
-			_ARRAY_FIELD_CASE(2, "y");
-			_ARRAY_FIELD_CASE(1, "x");
-			default: break;
-
-			#undef _ARRAY_FIELD_CASE
-			}
-		}
-	}
-
 	if (is_type) {
 		switch (type->kind) {
 		case Type_Struct:
@@ -1769,6 +1720,65 @@ Selection lookup_field_with_selection(Type *type_, String field_name, bool is_ty
 				return sel;
 			}
 		}
+	} else if (type->kind == Type_Basic) {
+		switch (type->Basic.kind) {
+		case Basic_any: {
+		#if 1
+			// IMPORTANT TODO(bill): Should these members be available to should I only allow them with
+			// `Raw_Any` type?
+			String data_str = str_lit("data");
+			String typeid_str = str_lit("typeid");
+			gb_local_persist Entity *entity__any_data = alloc_entity_field(nullptr, make_token_ident(data_str), t_rawptr, false, 0);
+			gb_local_persist Entity *entity__any_typeid = alloc_entity_field(nullptr, make_token_ident(typeid_str), t_typeid, false, 1);
+
+			if (field_name == data_str) {
+				selection_add_index(&sel, 0);
+				sel.entity = entity__any_data;
+				return sel;
+			} else if (field_name == typeid_str) {
+				selection_add_index(&sel, 1);
+				sel.entity = entity__any_typeid;
+				return sel;
+			}
+		#endif
+		} break;
+		}
+
+		return sel;
+	} else if (type->kind == Type_Array) {
+		if (type->Array.count <= 4) {
+			// HACK(bill): Memory leak
+			switch (type->Array.count) {
+			#define _ARRAY_FIELD_CASE(_length, _name) \
+			case (_length): \
+				if (field_name == _name) { \
+					selection_add_index(&sel, (_length)-1); \
+					sel.entity = alloc_entity_array_elem(nullptr, make_token_ident(str_lit(_name)), type->Array.elem, (_length)-1); \
+					return sel; \
+				} \
+				/*fallthrough*/
+
+			_ARRAY_FIELD_CASE(4, "w");
+			_ARRAY_FIELD_CASE(3, "z");
+			_ARRAY_FIELD_CASE(2, "y");
+			_ARRAY_FIELD_CASE(1, "x");
+			default: break;
+
+			#undef _ARRAY_FIELD_CASE
+			}
+		}
+	} else if (type->kind == Type_DynamicArray) {
+		// IMPORTANT TODO(bill): Should these members be available to should I only allow them with
+		// `Raw_Dynamic_Array` type?
+		GB_ASSERT(t_allocator != nullptr);
+		String allocator_str = str_lit("allocator");
+		gb_local_persist Entity *entity__allocator = alloc_entity_field(nullptr, make_token_ident(allocator_str), t_allocator, false, 0);
+
+		if (field_name == allocator_str) {
+			selection_add_index(&sel, 3);
+			sel.entity = entity__allocator;
+			return sel;
+		}
 	}
 
 	return sel;