Browse Source

Minor code cleanup for backend; add `struct_fields_index_by_increasing_offset` for future use

gingerBill 3 years ago
parent
commit
00671a59a0

+ 30 - 93
src/array.cpp

@@ -1,7 +1,6 @@
 #define ARRAY_GROW_FORMULA(x) (gb_max(((x)+1)*3 >> 1, 8))
 GB_STATIC_ASSERT(ARRAY_GROW_FORMULA(0) > 0);
 
-#if 1
 template <typename T>
 struct Array {
 	gbAllocator allocator;
@@ -418,98 +417,36 @@ void array_unordered_remove(Array<T> *array, isize index) {
 
 
 
-
-#endif
-
-#if 0
-#define Array(Type_) struct { \
-	gbAllocator const &allocator; \
-	Type_ *     e; \
-	isize       count; \
-	isize       capacity; \
+template <typename T>
+T *begin(Array<T> &array) {
+	return array.data;
 }
-
-typedef Array(void) ArrayVoid;
-
-#define array_init_reserve(x_, allocator_, init_capacity_) do { \
-	void **e = cast(void **)&((x_)->e); \
-	GB_ASSERT((x_) != nullptr); \
-	(x_)->allocator = (allocator_); \
-	(x_)->count = 0; \
-	(x_)->capacity = (init_capacity_); \
-	*e = gb_alloc((allocator_), gb_size_of(*(x_)->e)*(init_capacity_)); \
-} while (0)
-
-#define array_init_count(x_, allocator_, init_count_) do { \
-	void **e = cast(void **)&((x_)->e); \
-	GB_ASSERT((x_) != nullptr); \
-	(x_)->allocator = (allocator_); \
-	(x_)->count = (init_count_); \
-	(x_)->capacity = (init_count_); \
-	*e = gb_alloc((allocator_), gb_size_of(*(x_)->e)*(init_count_)); \
-} while (0)
-
-#define array_init(x_, allocator_)        do { array_init_reserve(x_, allocator_, ARRAY_GROW_FORMULA(0)); } while (0)
-#define array_free(x_)                    do { gb_free((x_)->allocator, (x_)->e); } while (0)
-#define array_set_capacity(x_, capacity_) do { array__set_capacity((x_), (capacity_), gb_size_of(*(x_)->e)); } while (0)
-
-#define array_grow(x_, min_capacity_) do { \
-	isize new_capacity = ARRAY_GROW_FORMULA((x_)->capacity); \
-	if (new_capacity < (min_capacity_)) { \
-		new_capacity = (min_capacity_); \
-	} \
-	array_set_capacity(x_, new_capacity); \
-} while (0)
-
-#define array_add(x_, item_) do { \
-	if ((x_)->capacity < (x_)->count+1) { \
-		array_grow(x_, 0); \
-	} \
-	(x_)->e[(x_)->count++] = item_; \
-} while (0)
-
-#define array_pop(x_)   do { GB_ASSERT((x_)->count > 0); (x_)->count--; } while (0)
-#define array_clear(x_) do { (x_)->count = 0; } while (0)
-
-#define array_resize(x_, new_count_) do { \
-	if ((x_)->capacity < (new_count_)) { \
-		array_grow((x_), (new_count_)); \
-	} \
-	(x_)->count = (new_count_); \
-} while (0)
-
-#define array_reserve(x_, new_capacity_) do { \
-	if ((x_)->capacity < (new_capacity_)) { \
-		array_set_capacity((x_), (new_capacity_)); \
-	} \
-} while (0)
-
-
-
-
-void array__set_capacity(void *ptr, isize capacity, isize element_size) {
-	ArrayVoid *x = cast(ArrayVoid *)ptr;
-	GB_ASSERT(ptr != nullptr);
-
-	GB_ASSERT(element_size > 0);
-
-	if (capacity == x->capacity) {
-		return;
-	}
-
-	if (capacity < x->count) {
-		if (x->capacity < capacity) {
-			isize new_capacity = ARRAY_GROW_FORMULA(x->capacity);
-			if (new_capacity < capacity) {
-				new_capacity = capacity;
-			}
-			array__set_capacity(ptr, new_capacity, element_size);
-		}
-		x->count = capacity;
-	}
-
-	x->e = gb_resize(x->allocator, x->e, element_size*x->capacity, element_size*capacity);
-	x->capacity = capacity;
+template <typename T>
+T const *begin(Array<T> const &array) {
+	return array.data;
+}
+template <typename T>
+T *end(Array<T> &array) {
+	return array.data + array.count;
+}
+template <typename T>
+T const *end(Array<T> const &array) {
+	return array.data + array.count;
 }
-#endif
 
+template <typename T>
+T *begin(Slice<T> &array) {
+	return array.data;
+}
+template <typename T>
+T const *begin(Slice<T> const &array) {
+	return array.data;
+}
+template <typename T>
+T *end(Slice<T> &array) {
+	return array.data + array.count;
+}
+template <typename T>
+T const *end(Slice<T> const &array) {
+	return array.data + array.count;
+}

+ 2 - 2
src/check_expr.cpp

@@ -2017,8 +2017,8 @@ void add_comparison_procedures_for_fields(CheckerContext *c, Type *t) {
 		}
 		break;
 	case Type_Struct:
-		for_array(i, t->Struct.fields) {
-			add_comparison_procedures_for_fields(c, t->Struct.fields[i]->type);
+		for (Entity *field : t->Struct.fields) {
+			add_comparison_procedures_for_fields(c, field->type);
 		}
 		break;
 	}

+ 1 - 2
src/check_type.cpp

@@ -39,8 +39,7 @@ void populate_using_entity_scope(CheckerContext *ctx, Ast *node, AstField *field
 	}
 
 	if (t->kind == Type_Struct) {
-		for_array(i, t->Struct.fields) {
-			Entity *f = t->Struct.fields[i];
+		for (Entity *f : t->Struct.fields) {
 			GB_ASSERT(f->kind == Entity_Variable);
 			String name = f->token.string;
 			Entity *e = scope_lookup_current(ctx->scope, name);

+ 1 - 1
src/llvm_backend.hpp

@@ -436,7 +436,7 @@ lbValue lb_emit_logical_binary_expr(lbProcedure *p, TokenKind op, Ast *left, Ast
 lbValue lb_build_cond(lbProcedure *p, Ast *cond, lbBlock *true_block, lbBlock *false_block);
 
 LLVMValueRef llvm_const_named_struct(lbModule *m, Type *t, LLVMValueRef *values, isize value_count_);
-LLVMValueRef llvm_const_named_struct(LLVMTypeRef t, LLVMValueRef *values, isize value_count_);
+LLVMValueRef llvm_const_named_struct_internal(LLVMTypeRef t, LLVMValueRef *values, isize value_count_);
 void lb_set_entity_from_other_modules_linkage_correctly(lbModule *other_module, Entity *e, String const &name);
 
 lbValue lb_expr_untyped_const_to_typed(lbModule *m, Ast *expr, Type *t);

+ 5 - 5
src/llvm_backend_const.cpp

@@ -132,7 +132,7 @@ LLVMValueRef llvm_const_named_struct(lbModule *m, Type *t, LLVMValueRef *values,
 	unsigned value_count = cast(unsigned)value_count_;
 	unsigned elem_count = LLVMCountStructElementTypes(struct_type);
 	if (elem_count == value_count) {
-		return llvm_const_named_struct(struct_type, values, value_count_);
+		return llvm_const_named_struct_internal(struct_type, values, value_count_);
 	}
 	Type *bt = base_type(t);
 	GB_ASSERT(bt->kind == Type_Struct);
@@ -152,10 +152,10 @@ LLVMValueRef llvm_const_named_struct(lbModule *m, Type *t, LLVMValueRef *values,
 		}
 	}
 	
-	return llvm_const_named_struct(struct_type, values_with_padding, values_with_padding_count);
+	return llvm_const_named_struct_internal(struct_type, values_with_padding, values_with_padding_count);
 }
 
-LLVMValueRef llvm_const_named_struct(LLVMTypeRef t, LLVMValueRef *values, isize value_count_) {
+LLVMValueRef llvm_const_named_struct_internal(LLVMTypeRef t, LLVMValueRef *values, isize value_count_) {
 	unsigned value_count = cast(unsigned)value_count_;
 	unsigned elem_count = LLVMCountStructElementTypes(t);
 	GB_ASSERT_MSG(value_count == elem_count, "%s %u %u", LLVMPrintTypeToString(t), value_count, elem_count);
@@ -895,7 +895,7 @@ lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bool allow_loc
 			}
 
 			if (is_constant) {
-				res.value = llvm_const_named_struct(struct_type, values, cast(unsigned)value_count);
+				res.value = llvm_const_named_struct_internal(struct_type, values, cast(unsigned)value_count);
 				return res;
 			} else {
 				// TODO(bill): THIS IS HACK BUT IT WORKS FOR WHAT I NEED
@@ -909,7 +909,7 @@ lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bool allow_loc
 						new_values[i] = LLVMConstNull(LLVMTypeOf(old_value));
 					}
 				}
-				LLVMValueRef constant_value = llvm_const_named_struct(struct_type, new_values, cast(unsigned)value_count);
+				LLVMValueRef constant_value = llvm_const_named_struct_internal(struct_type, new_values, cast(unsigned)value_count);
 
 				GB_ASSERT(is_local);
 				lbProcedure *p = m->curr_procedure;

+ 10 - 8
src/llvm_backend_general.cpp

@@ -1700,18 +1700,18 @@ LLVMTypeRef lb_type_internal(lbModule *m, Type *type) {
 			auto fields = array_make<LLVMTypeRef>(temporary_allocator(), 0, type->Struct.fields.count*2 + 1);
 			
 			i64 padding_offset = 0;
-			for_array(i, type->Struct.fields) {
-				GB_ASSERT(type->Struct.offsets != nullptr);
-				
-				Entity *field = type->Struct.fields[i];
-				i64 padding = type->Struct.offsets[i] - padding_offset;
+			// auto field_indices = struct_fields_index_by_increasing_offset(type, temporary_allocator());
+			// for (i32 field_index : field_indices) {
+			for (isize field_index = 0; field_index < type->Struct.fields.count; field_index++) {
+				Entity *field = type->Struct.fields[field_index];
+				i64 padding = type->Struct.offsets[field_index] - padding_offset;
 
 				if (padding != 0) {
 					LLVMTypeRef padding_type = lb_type_padding_filler(m, padding, type_align_of(field->type));
 					array_add(&fields, padding_type);					
 				}
 				
-				field_remapping[i] = cast(i32)fields.count;
+				field_remapping[field_index] = cast(i32)fields.count;
 				array_add(&fields, lb_type(m, field->type));
 				
 				if (!type->Struct.is_packed) {
@@ -1720,7 +1720,8 @@ LLVMTypeRef lb_type_internal(lbModule *m, Type *type) {
 				padding_offset += type_size_of(field->type);
 			}
 			
-			i64 end_padding = type_size_of(type)-padding_offset;
+			i64 full_type_size = type_size_of(type);
+			i64 end_padding = full_type_size-padding_offset;
 			if (end_padding > 0) {
 				array_add(&fields, lb_type_padding_filler(m, end_padding, 1));
 			}
@@ -1731,7 +1732,8 @@ LLVMTypeRef lb_type_internal(lbModule *m, Type *type) {
 			
 			LLVMTypeRef struct_type = LLVMStructTypeInContext(ctx, fields.data, cast(unsigned)fields.count, type->Struct.is_packed);
 			map_set(&m->struct_field_remapping, hash_pointer(struct_type), field_remapping);
-			map_set(&m->struct_field_remapping, hash_pointer(type), field_remapping);
+			map_set(&m->struct_field_remapping, hash_pointer(type), field_remapping);			
+			GB_ASSERT(lb_sizeof(struct_type) == full_type_size);
 			return struct_type;
 		}
 		break;

+ 1 - 1
src/llvm_backend_type.cpp

@@ -171,7 +171,7 @@ void lb_setup_type_info_data(lbProcedure *p) { // NOTE(bill): Setup type_info da
 			LLVMConstInBoundsGEP(lb_global_type_info_data_ptr(m).value, indices, gb_count_of(indices)),
 			LLVMConstInt(lb_type(m, t_int), type->Array.count, true),
 		};
-		LLVMValueRef slice = llvm_const_named_struct(llvm_addr_type(global_type_table), values, gb_count_of(values));
+		LLVMValueRef slice = llvm_const_named_struct_internal(llvm_addr_type(global_type_table), values, gb_count_of(values));
 
 		LLVMSetInitializer(global_type_table.value, slice);
 	}

+ 1 - 1
src/llvm_backend_utility.cpp

@@ -139,7 +139,7 @@ lbValue lb_emit_string(lbProcedure *p, lbValue str_elem, lbValue str_len) {
 		};
 		lbValue res = {};
 		res.type = t_string;
-		res.value = llvm_const_named_struct(lb_type(p->module, t_string), values, gb_count_of(values));
+		res.value = llvm_const_named_struct(p->module, t_string, values, gb_count_of(values));
 		return res;
 	} else {
 		lbAddr res = lb_add_local_generated(p, t_string, false);

+ 48 - 0
src/types.cpp

@@ -679,6 +679,7 @@ bool are_types_identical(Type *x, Type *y);
 bool is_type_pointer(Type *t);
 bool is_type_slice(Type *t);
 bool is_type_integer(Type *t);
+bool type_set_offsets(Type *t);
 
 void init_type_mutex(void) {
 	mutex_init(&g_type_mutex);
@@ -2758,6 +2759,53 @@ Selection lookup_field_with_selection(Type *type_, String field_name, bool is_ty
 	return sel;
 }
 
+GB_COMPARE_PROC(struct_field_cmp_by_offset) {
+	i64 x = *(i64 const *)(a);
+	i64 y = *(i64 const *)(b);
+	if (x < y) {
+		return -1;
+	} else if (x > y) {
+		return +1;
+	}
+	return 0;
+}
+
+
+Slice<i32> struct_fields_index_by_increasing_offset(Type *type, gbAllocator allocator) {
+	type = base_type(type);
+	GB_ASSERT(type->kind == Type_Struct);
+	type_set_offsets(type);
+	GB_ASSERT(type->Struct.offsets != nullptr);
+	auto indices = slice_make<i32>(allocator, type->Struct.fields.count);
+	
+	i64 prev_offset = 0;
+	bool is_ordered = true;
+	for_array(i, indices) {
+		indices.data[i] = cast(i32)i;
+		i64 offset = type->Struct.offsets[i];
+		if (is_ordered && prev_offset > offset) {
+			is_ordered = false;
+		}
+		prev_offset = offset;
+	}
+	if (!is_ordered) {
+		isize n = indices.count;
+		for (isize i = 0; i < n-1; i++) {
+			for (isize j = 0; j < n-i-1; j++) {
+				isize a = j;
+				isize b = j+1;
+				if (type->Struct.offsets[a] > type->Struct.offsets[b]) {
+					gb_swap(i32, indices[a], indices[b]);
+				}
+			}
+		}
+	}
+	
+	return indices;
+}
+
+
+
 
 // IMPORTANT TODO(bill): SHould this TypePath code be removed since type cycle checking is handled much earlier on?