Browse Source

Begin clarifying allocation patterns by changing from `heap_allocator` to specific arenas

gingerBill 4 years ago
parent
commit
3c1c10a178
10 changed files with 192 additions and 172 deletions
  1. 3 1
      src/checker.cpp
  2. 0 2
      src/checker.hpp
  3. 40 8
      src/common.cpp
  4. 1 1
      src/entity.cpp
  5. 6 1
      src/llvm_abi.cpp
  6. 97 136
      src/llvm_backend.cpp
  7. 15 1
      src/main.cpp
  8. 25 18
      src/parser.cpp
  9. 1 0
      src/parser.hpp
  10. 4 4
      src/types.cpp

+ 3 - 1
src/checker.cpp

@@ -690,8 +690,10 @@ void add_global_type_entity(String name, Type *type) {
 
 
 void init_universal(void) {
 void init_universal(void) {
 	BuildContext *bc = &build_context;
 	BuildContext *bc = &build_context;
+
 	// NOTE(bill): No need to free these
 	// NOTE(bill): No need to free these
-	gbAllocator a = heap_allocator();
+	// gbAllocator a = heap_allocator();
+	gbAllocator a = permanent_allocator();
 
 
 	builtin_pkg = gb_alloc_item(a, AstPackage);
 	builtin_pkg = gb_alloc_item(a, AstPackage);
 	builtin_pkg->name = str_lit("builtin");
 	builtin_pkg->name = str_lit("builtin");

+ 0 - 2
src/checker.hpp

@@ -337,8 +337,6 @@ struct Checker {
 
 
 
 
 
 
-
-
 gb_global AstPackage *builtin_pkg    = nullptr;
 gb_global AstPackage *builtin_pkg    = nullptr;
 gb_global AstPackage *intrinsics_pkg = nullptr;
 gb_global AstPackage *intrinsics_pkg = nullptr;
 gb_global AstPackage *config_pkg      = nullptr;
 gb_global AstPackage *config_pkg      = nullptr;

+ 40 - 8
src/common.cpp

@@ -373,8 +373,8 @@ typedef struct Arena {
 	gbAllocator backing;
 	gbAllocator backing;
 	isize       block_size;
 	isize       block_size;
 	gbMutex     mutex;
 	gbMutex     mutex;
-
 	isize total_used;
 	isize total_used;
+	bool   use_mutex;
 } Arena;
 } Arena;
 
 
 #define ARENA_MIN_ALIGNMENT 16
 #define ARENA_MIN_ALIGNMENT 16
@@ -388,8 +388,9 @@ void arena_init(Arena *arena, gbAllocator backing, isize block_size=ARENA_DEFAUL
 }
 }
 
 
 void arena_grow(Arena *arena, isize min_size) {
 void arena_grow(Arena *arena, isize min_size) {
-	// gb_mutex_lock(&arena->mutex);
-	// defer (gb_mutex_unlock(&arena->mutex));
+	if (arena->use_mutex) {
+		gb_mutex_lock(&arena->mutex);
+	}
 
 
 	isize size = gb_max(arena->block_size, min_size);
 	isize size = gb_max(arena->block_size, min_size);
 	size = ALIGN_UP(size, ARENA_MIN_ALIGNMENT);
 	size = ALIGN_UP(size, ARENA_MIN_ALIGNMENT);
@@ -399,11 +400,16 @@ void arena_grow(Arena *arena, isize min_size) {
 	GB_ASSERT(arena->ptr == ALIGN_DOWN_PTR(arena->ptr, ARENA_MIN_ALIGNMENT));
 	GB_ASSERT(arena->ptr == ALIGN_DOWN_PTR(arena->ptr, ARENA_MIN_ALIGNMENT));
 	arena->end = arena->ptr + size;
 	arena->end = arena->ptr + size;
 	array_add(&arena->blocks, arena->ptr);
 	array_add(&arena->blocks, arena->ptr);
+
+	if (arena->use_mutex) {
+		gb_mutex_unlock(&arena->mutex);
+	}
 }
 }
 
 
 void *arena_alloc(Arena *arena, isize size, isize alignment) {
 void *arena_alloc(Arena *arena, isize size, isize alignment) {
-	// gb_mutex_lock(&arena->mutex);
-	// defer (gb_mutex_unlock(&arena->mutex));
+	if (arena->use_mutex) {
+		gb_mutex_lock(&arena->mutex);
+	}
 
 
 	arena->total_used += size;
 	arena->total_used += size;
 
 
@@ -419,12 +425,17 @@ void *arena_alloc(Arena *arena, isize size, isize alignment) {
 	GB_ASSERT(arena->ptr <= arena->end);
 	GB_ASSERT(arena->ptr <= arena->end);
 	GB_ASSERT(ptr == ALIGN_DOWN_PTR(ptr, align));
 	GB_ASSERT(ptr == ALIGN_DOWN_PTR(ptr, align));
 	// zero_size(ptr, size);
 	// zero_size(ptr, size);
+
+	if (arena->use_mutex) {
+		gb_mutex_unlock(&arena->mutex);
+	}
 	return ptr;
 	return ptr;
 }
 }
 
 
 void arena_free_all(Arena *arena) {
 void arena_free_all(Arena *arena) {
-	// gb_mutex_lock(&arena->mutex);
-	// defer (gb_mutex_unlock(&arena->mutex));
+	if (arena->use_mutex) {
+		gb_mutex_lock(&arena->mutex);
+	}
 
 
 	for_array(i, arena->blocks) {
 	for_array(i, arena->blocks) {
 		gb_free(arena->backing, arena->blocks[i]);
 		gb_free(arena->backing, arena->blocks[i]);
@@ -432,6 +443,10 @@ void arena_free_all(Arena *arena) {
 	array_clear(&arena->blocks);
 	array_clear(&arena->blocks);
 	arena->ptr = nullptr;
 	arena->ptr = nullptr;
 	arena->end = nullptr;
 	arena->end = nullptr;
+
+	if (arena->use_mutex) {
+		gb_mutex_unlock(&arena->mutex);
+	}
 }
 }
 
 
 
 
@@ -460,7 +475,14 @@ GB_ALLOCATOR_PROC(arena_allocator_proc) {
 		// GB_PANIC("gbAllocation_Free not supported");
 		// GB_PANIC("gbAllocation_Free not supported");
 		break;
 		break;
 	case gbAllocation_Resize:
 	case gbAllocation_Resize:
-		GB_PANIC("gbAllocation_Resize: not supported");
+		if (size == 0) {
+			ptr = nullptr;
+		} else if (size <= old_size) {
+			ptr = old_memory;
+		} else {
+			ptr = arena_alloc(arena, size, alignment);
+			gb_memmove(ptr, old_memory, old_size);
+		}
 		break;
 		break;
 	case gbAllocation_FreeAll:
 	case gbAllocation_FreeAll:
 		arena_free_all(arena);
 		arena_free_all(arena);
@@ -472,6 +494,16 @@ GB_ALLOCATOR_PROC(arena_allocator_proc) {
 
 
 
 
 
 
+gb_global Arena permanent_arena = {};
+gb_global Arena temporary_arena = {};
+
+gbAllocator permanent_allocator() {
+	return arena_allocator(&permanent_arena);
+}
+gbAllocator temporary_allocator() {
+	return arena_allocator(&temporary_arena);
+}
+
 
 
 
 
 
 

+ 1 - 1
src/entity.cpp

@@ -220,7 +220,7 @@ bool entity_has_deferred_procedure(Entity *e) {
 gb_global u64 global_entity_id = 0;
 gb_global u64 global_entity_id = 0;
 
 
 Entity *alloc_entity(EntityKind kind, Scope *scope, Token token, Type *type) {
 Entity *alloc_entity(EntityKind kind, Scope *scope, Token token, Type *type) {
-	gbAllocator a = heap_allocator();
+	gbAllocator a = permanent_allocator();
 	Entity *entity = gb_alloc_item(a, Entity);
 	Entity *entity = gb_alloc_item(a, Entity);
 	entity->kind   = kind;
 	entity->kind   = kind;
 	entity->state  = EntityState_Unresolved;
 	entity->state  = EntityState_Unresolved;

+ 6 - 1
src/llvm_abi.cpp

@@ -276,6 +276,9 @@ Type *alloc_type_struct_from_field_types(Type **field_types, isize field_count,
 }
 }
 
 
 Type *alloc_type_tuple_from_field_types(Type **field_types, isize field_count, bool is_packed, bool must_be_tuple) {
 Type *alloc_type_tuple_from_field_types(Type **field_types, isize field_count, bool is_packed, bool must_be_tuple) {
+	if (field_count == 0) {
+		return nullptr;
+	}
 	if (!must_be_tuple && field_count == 1) {
 	if (!must_be_tuple && field_count == 1) {
 		return field_types[0];
 		return field_types[0];
 	}
 	}
@@ -297,7 +300,9 @@ Type *alloc_type_proc_from_types(Type **param_types, unsigned param_count, Type
 	Type *params  = alloc_type_tuple_from_field_types(param_types, param_count, false, true);
 	Type *params  = alloc_type_tuple_from_field_types(param_types, param_count, false, true);
 	isize results_count = 0;
 	isize results_count = 0;
 	if (results != nullptr) {
 	if (results != nullptr) {
-		GB_ASSERT(results->kind == Type_Tuple);
+		if (results->kind != Type_Tuple) {
+			results = alloc_type_tuple_from_field_types(&results, 1, false, true);
+		}
 		results_count = results->Tuple.variables.count;
 		results_count = results->Tuple.variables.count;
 	}
 	}
 
 

File diff suppressed because it is too large
+ 97 - 136
src/llvm_backend.cpp


+ 15 - 1
src/main.cpp

@@ -1643,12 +1643,15 @@ int main(int arg_count, char const **arg_ptr) {
 	timings_init(timings, str_lit("Total Time"), 128);
 	timings_init(timings, str_lit("Total Time"), 128);
 	defer (timings_destroy(timings));
 	defer (timings_destroy(timings));
 
 
+	arena_init(&permanent_arena, heap_allocator());
+	arena_init(&temporary_arena, heap_allocator());
+	arena_init(&global_ast_arena, heap_allocator());
+
 	init_string_buffer_memory();
 	init_string_buffer_memory();
 	init_string_interner();
 	init_string_interner();
 	init_global_error_collector();
 	init_global_error_collector();
 	init_keyword_hash_table();
 	init_keyword_hash_table();
 	global_big_int_init();
 	global_big_int_init();
-	arena_init(&global_ast_arena, heap_allocator());
 
 
 	array_init(&library_collections, heap_allocator());
 	array_init(&library_collections, heap_allocator());
 	// NOTE(bill): 'core' cannot be (re)defined by the user
 	// NOTE(bill): 'core' cannot be (re)defined by the user
@@ -1795,6 +1798,8 @@ int main(int arg_count, char const **arg_ptr) {
 		return 1;
 		return 1;
 	}
 	}
 
 
+	arena_free_all(&temporary_arena);
+
 	if (build_context.generate_docs) {
 	if (build_context.generate_docs) {
 		// generate_documentation(&parser);
 		// generate_documentation(&parser);
 		return 0;
 		return 0;
@@ -1812,6 +1817,7 @@ int main(int arg_count, char const **arg_ptr) {
 		check_parsed_files(&checker);
 		check_parsed_files(&checker);
 	}
 	}
 
 
+	arena_free_all(&temporary_arena);
 
 
 	if (build_context.no_output_files) {
 	if (build_context.no_output_files) {
 		if (build_context.query_data_set_settings.ok) {
 		if (build_context.query_data_set_settings.ok) {
@@ -1842,6 +1848,8 @@ int main(int arg_count, char const **arg_ptr) {
 		}
 		}
 		lb_generate_code(&gen);
 		lb_generate_code(&gen);
 
 
+		arena_free_all(&temporary_arena);
+
 		switch (build_context.build_mode) {
 		switch (build_context.build_mode) {
 		case BuildMode_Executable:
 		case BuildMode_Executable:
 		case BuildMode_DynamicLibrary:
 		case BuildMode_DynamicLibrary:
@@ -1919,12 +1927,18 @@ int main(int arg_count, char const **arg_ptr) {
 		timings_start_section(timings, str_lit("llvm ir gen"));
 		timings_start_section(timings, str_lit("llvm ir gen"));
 		ir_gen_tree(&ir_gen);
 		ir_gen_tree(&ir_gen);
 
 
+		arena_free_all(&temporary_arena);
+
 		timings_start_section(timings, str_lit("llvm ir opt tree"));
 		timings_start_section(timings, str_lit("llvm ir opt tree"));
 		ir_opt_tree(&ir_gen);
 		ir_opt_tree(&ir_gen);
 
 
+		arena_free_all(&temporary_arena);
+
 		timings_start_section(timings, str_lit("llvm ir print"));
 		timings_start_section(timings, str_lit("llvm ir print"));
 		print_llvm_ir(&ir_gen);
 		print_llvm_ir(&ir_gen);
 
 
+		arena_free_all(&temporary_arena);
+
 
 
 		String output_name = ir_gen.output_name;
 		String output_name = ir_gen.output_name;
 		String output_base = ir_gen.output_base;
 		String output_base = ir_gen.output_base;

+ 25 - 18
src/parser.cpp

@@ -108,6 +108,30 @@ Token ast_token(Ast *node) {
 	return empty_token;
 	return empty_token;
 }
 }
 
 
+
+gb_global gbAtomic64 total_allocated_node_memory = {0};
+gb_global gbAtomic64 total_subtype_node_memory_test = {0};
+
+isize ast_node_size(AstKind kind) {
+	return align_formula_isize(gb_size_of(AstCommonStuff) + ast_variant_sizes[kind], gb_align_of(void *));
+
+}
+// NOTE(bill): And this below is why is I/we need a new language! Discriminated unions are a pain in C/C++
+Ast *alloc_ast_node(AstFile *f, AstKind kind) {
+	gbAllocator a = ast_allocator(f);
+
+	isize size = ast_node_size(kind);
+
+	gb_atomic64_fetch_add(&total_allocated_node_memory, cast(i64)(gb_size_of(Ast)));
+	gb_atomic64_fetch_add(&total_subtype_node_memory_test, cast(i64)(gb_size_of(AstCommonStuff) + ast_variant_sizes[kind]));
+
+	// Ast *node = gb_alloc_item(a, Ast);
+	Ast *node = cast(Ast *)gb_alloc(a, size);
+	node->kind = kind;
+	node->file = f;
+	return node;
+}
+
 Ast *clone_ast(Ast *node);
 Ast *clone_ast(Ast *node);
 Array<Ast *> clone_ast_array(Array<Ast *> array) {
 Array<Ast *> clone_ast_array(Array<Ast *> array) {
 	Array<Ast *> result = {};
 	Array<Ast *> result = {};
@@ -125,7 +149,7 @@ Ast *clone_ast(Ast *node) {
 		return nullptr;
 		return nullptr;
 	}
 	}
 	Ast *n = alloc_ast_node(node->file, node->kind);
 	Ast *n = alloc_ast_node(node->file, node->kind);
-	gb_memmove(n, node, gb_size_of(Ast));
+	gb_memmove(n, node, ast_node_size(node->kind));
 
 
 	switch (n->kind) {
 	switch (n->kind) {
 	default: GB_PANIC("Unhandled Ast %.*s", LIT(ast_strings[n->kind])); break;
 	default: GB_PANIC("Unhandled Ast %.*s", LIT(ast_strings[n->kind])); break;
@@ -463,23 +487,6 @@ bool ast_node_expect(Ast *node, AstKind kind) {
 	return true;
 	return true;
 }
 }
 
 
-
-gb_global gbAtomic64 total_allocated_node_memory = {0};
-gb_global gbAtomic64 total_subtype_node_memory_test = {0};
-
-// NOTE(bill): And this below is why is I/we need a new language! Discriminated unions are a pain in C/C++
-Ast *alloc_ast_node(AstFile *f, AstKind kind) {
-	gbAllocator a = ast_allocator(f);
-
-	gb_atomic64_fetch_add(&total_allocated_node_memory, cast(i64)(gb_size_of(Ast)));
-	gb_atomic64_fetch_add(&total_subtype_node_memory_test, cast(i64)(gb_size_of(AstCommonStuff) + ast_variant_sizes[kind]));
-
-	Ast *node = gb_alloc_item(a, Ast);
-	node->kind = kind;
-	node->file = f;
-	return node;
-}
-
 Ast *ast_bad_expr(AstFile *f, Token begin, Token end) {
 Ast *ast_bad_expr(AstFile *f, Token begin, Token end) {
 	Ast *result = alloc_ast_node(f, Ast_BadExpr);
 	Ast *result = alloc_ast_node(f, Ast_BadExpr);
 	result->BadExpr.begin = begin;
 	result->BadExpr.begin = begin;

+ 1 - 0
src/parser.hpp

@@ -655,6 +655,7 @@ struct Ast {
 	Scope *      scope;
 	Scope *      scope;
 	TypeAndValue tav;
 	TypeAndValue tav;
 
 
+	// IMPORTANT NOTE(bill): This must be at the end since the AST is allocated to be size of the variant
 	union {
 	union {
 #define AST_KIND(_kind_name_, name, ...) GB_JOIN2(Ast, _kind_name_) _kind_name_;
 #define AST_KIND(_kind_name_, name, ...) GB_JOIN2(Ast, _kind_name_) _kind_name_;
 	AST_KINDS
 	AST_KINDS

+ 4 - 4
src/types.cpp

@@ -771,7 +771,8 @@ void set_base_type(Type *t, Type *base) {
 
 
 
 
 Type *alloc_type(TypeKind kind) {
 Type *alloc_type(TypeKind kind) {
-	gbAllocator a = heap_allocator();
+	// gbAllocator a = heap_allocator();
+	gbAllocator a = permanent_allocator();
 	Type *t = gb_alloc_item(a, Type);
 	Type *t = gb_alloc_item(a, Type);
 	zero_item(t);
 	zero_item(t);
 	t->kind = kind;
 	t->kind = kind;
@@ -2340,7 +2341,7 @@ Selection lookup_field_from_index(Type *type, i64 index) {
 	GB_ASSERT(is_type_struct(type) || is_type_union(type) || is_type_tuple(type));
 	GB_ASSERT(is_type_struct(type) || is_type_union(type) || is_type_tuple(type));
 	type = base_type(type);
 	type = base_type(type);
 
 
-	gbAllocator a = heap_allocator();
+	gbAllocator a = permanent_allocator();
 	isize max_count = 0;
 	isize max_count = 0;
 	switch (type->kind) {
 	switch (type->kind) {
 	case Type_Struct:   max_count = type->Struct.fields.count;   break;
 	case Type_Struct:   max_count = type->Struct.fields.count;   break;
@@ -2397,7 +2398,6 @@ Selection lookup_field_with_selection(Type *type_, String field_name, bool is_ty
 		return empty_selection;
 		return empty_selection;
 	}
 	}
 
 
-	gbAllocator a = heap_allocator();
 	Type *type = type_deref(type_);
 	Type *type = type_deref(type_);
 	bool is_ptr = type != type_;
 	bool is_ptr = type != type_;
 	sel.indirect = sel.indirect || is_ptr;
 	sel.indirect = sel.indirect || is_ptr;
@@ -2986,7 +2986,7 @@ i64 type_align_of_internal(Type *t, TypePath *path) {
 }
 }
 
 
 Array<i64> type_set_offsets_of(Array<Entity *> const &fields, bool is_packed, bool is_raw_union) {
 Array<i64> type_set_offsets_of(Array<Entity *> const &fields, bool is_packed, bool is_raw_union) {
-	gbAllocator a = heap_allocator();
+	gbAllocator a = permanent_allocator();
 	auto offsets = array_make<i64>(a, fields.count);
 	auto offsets = array_make<i64>(a, fields.count);
 	i64 curr_offset = 0;
 	i64 curr_offset = 0;
 	if (is_raw_union) {
 	if (is_raw_union) {

Some files were not shown because too many files changed in this diff