Browse Source

Remove need for `BlockingMutex` in `Arena`

gingerBill 1 year ago
parent
commit
a45e05bb18
3 changed files with 5 additions and 16 deletions
  1. 3 12
      src/common_memory.cpp
  2. 1 1
      src/parser.cpp
  3. 1 3
      src/parser.hpp

+ 3 - 12
src/common_memory.cpp

@@ -45,7 +45,7 @@ struct MemoryBlock {
 struct Arena {
 struct Arena {
 	MemoryBlock * curr_block;
 	MemoryBlock * curr_block;
 	isize         minimum_block_size;
 	isize         minimum_block_size;
-	BlockingMutex mutex;
+	// BlockingMutex mutex;
 	isize         temp_count;
 	isize         temp_count;
 	Thread *      parent_thread;
 	Thread *      parent_thread;
 };
 };
@@ -82,12 +82,7 @@ gb_internal void thread_init_arenas(Thread *t) {
 
 
 gb_internal void *arena_alloc(Arena *arena, isize min_size, isize alignment) {
 gb_internal void *arena_alloc(Arena *arena, isize min_size, isize alignment) {
 	GB_ASSERT(gb_is_power_of_two(alignment));
 	GB_ASSERT(gb_is_power_of_two(alignment));
-	
-	if (arena->parent_thread == nullptr) {
-		mutex_lock(&arena->mutex);
-	} else {
-		GB_ASSERT(arena->parent_thread == get_current_thread());
-	}
+	GB_ASSERT(arena->parent_thread == get_current_thread());
 
 
 	isize size = 0;
 	isize size = 0;
 	if (arena->curr_block != nullptr) {
 	if (arena->curr_block != nullptr) {
@@ -113,11 +108,7 @@ gb_internal void *arena_alloc(Arena *arena, isize min_size, isize alignment) {
 	
 	
 	curr_block->used += size;
 	curr_block->used += size;
 	GB_ASSERT(curr_block->used <= curr_block->size);
 	GB_ASSERT(curr_block->used <= curr_block->size);
-	
-	if (arena->parent_thread == nullptr) {
-		mutex_unlock(&arena->mutex);
-	}
-	
+
 	// NOTE(bill): memory will be zeroed by default due to virtual memory 
 	// NOTE(bill): memory will be zeroed by default due to virtual memory 
 	return ptr;	
 	return ptr;	
 }
 }

+ 1 - 1
src/parser.cpp

@@ -118,7 +118,7 @@ gb_internal isize ast_node_size(AstKind kind) {
 gb_internal Ast *alloc_ast_node(AstFile *f, AstKind kind) {
 gb_internal Ast *alloc_ast_node(AstFile *f, AstKind kind) {
 	isize size = ast_node_size(kind);
 	isize size = ast_node_size(kind);
 
 
-	Ast *node = cast(Ast *)arena_alloc(&global_thread_local_ast_arena, size, 16);
+	Ast *node = cast(Ast *)arena_alloc(get_arena(ThreadArena_Permanent), size, 16);
 	node->kind = kind;
 	node->kind = kind;
 	node->file_id = f ? f->id : 0;
 	node->file_id = f ? f->id : 0;
 
 

+ 1 - 3
src/parser.hpp

@@ -878,10 +878,8 @@ gb_internal gb_inline bool is_ast_when_stmt(Ast *node) {
 	return node->kind == Ast_WhenStmt;
 	return node->kind == Ast_WhenStmt;
 }
 }
 
 
-gb_global gb_thread_local Arena global_thread_local_ast_arena = {};
-
 gb_internal gb_inline gbAllocator ast_allocator(AstFile *f) {
 gb_internal gb_inline gbAllocator ast_allocator(AstFile *f) {
-	return arena_allocator(&global_thread_local_ast_arena);
+	return permanent_allocator();
 }
 }
 
 
 gb_internal Ast *alloc_ast_node(AstFile *f, AstKind kind);
 gb_internal Ast *alloc_ast_node(AstFile *f, AstKind kind);