Browse Source

Update alignment rules for `matrix` types as a compromise to keep zero padding

gingerBill 4 years ago
parent
commit
306bdf8869
6 changed files with 147 additions and 107 deletions
  1. 2 2
      src/check_builtin.cpp
  2. 6 6
      src/check_type.cpp
  3. 19 4
      src/llvm_backend_expr.cpp
  4. 6 2
      src/llvm_backend_general.cpp
  5. 1 1
      src/llvm_backend_utility.cpp
  6. 113 92
      src/types.cpp

+ 2 - 2
src/check_builtin.cpp

@@ -2083,8 +2083,8 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32
 		}
 		
 		i64 max_count = xt->Array.count*yt->Array.count;
-		if (max_count > MAX_MATRIX_ELEMENT_COUNT) {
-			error(call, "Product of the array lengths exceed the maximum matrix element count, got %d, expected a maximum of %d", cast(int)max_count, MAX_MATRIX_ELEMENT_COUNT);
+		if (max_count > MATRIX_ELEMENT_COUNT_MAX) {
+			error(call, "Product of the array lengths exceed the maximum matrix element count, got %d, expected a maximum of %d", cast(int)max_count, MATRIX_ELEMENT_COUNT_MAX);
 			return false;
 		}
 		

+ 6 - 6
src/check_type.cpp

@@ -2226,21 +2226,21 @@ void check_matrix_type(CheckerContext *ctx, Type **type, Ast *node) {
 		generic_column = column.type;
 	}
 	
-	if (row_count < MIN_MATRIX_ELEMENT_COUNT && generic_row == nullptr) {
+	if (row_count < MATRIX_ELEMENT_COUNT_MIN && generic_row == nullptr) {
 		gbString s = expr_to_string(row.expr);
-		error(row.expr, "Invalid matrix row count, expected %d+ rows, got %s", MIN_MATRIX_ELEMENT_COUNT, s);
+		error(row.expr, "Invalid matrix row count, expected %d+ rows, got %s", MATRIX_ELEMENT_COUNT_MIN, s);
 		gb_string_free(s);
 	}
 	
-	if (column_count < MIN_MATRIX_ELEMENT_COUNT && generic_column == nullptr) {
+	if (column_count < MATRIX_ELEMENT_COUNT_MIN && generic_column == nullptr) {
 		gbString s = expr_to_string(column.expr);
-		error(column.expr, "Invalid matrix column count, expected %d+ rows, got %s", MIN_MATRIX_ELEMENT_COUNT, s);
+		error(column.expr, "Invalid matrix column count, expected %d+ rows, got %s", MATRIX_ELEMENT_COUNT_MIN, s);
 		gb_string_free(s);
 	}
 	
-	if (row_count*column_count > MAX_MATRIX_ELEMENT_COUNT) {
+	if (row_count*column_count > MATRIX_ELEMENT_COUNT_MAX) {
 		i64 element_count = row_count*column_count;
-		error(column.expr, "Matrix types are limited to a maximum of %d elements, got %lld", MAX_MATRIX_ELEMENT_COUNT, cast(long long)element_count);
+		error(column.expr, "Matrix types are limited to a maximum of %d elements, got %lld", MATRIX_ELEMENT_COUNT_MAX, cast(long long)element_count);
 	}
 	
 	if (!is_type_valid_for_matrix_elems(elem)) {

+ 19 - 4
src/llvm_backend_expr.cpp

@@ -511,10 +511,16 @@ LLVMValueRef lb_matrix_to_vector(lbProcedure *p, lbValue matrix) {
 	unsigned total_count = cast(unsigned)matrix_type_total_internal_elems(mt);
 	LLVMTypeRef total_matrix_type = LLVMVectorType(elem_type, total_count);
 	
+#if 1
 	LLVMValueRef ptr = lb_address_from_load_or_generate_local(p, matrix).value;
 	LLVMValueRef matrix_vector_ptr = LLVMBuildPointerCast(p->builder, ptr, LLVMPointerType(total_matrix_type, 0), "");
 	LLVMValueRef matrix_vector = LLVMBuildLoad(p->builder, matrix_vector_ptr, "");
+	LLVMSetAlignment(matrix_vector, cast(unsigned)type_align_of(mt));
 	return matrix_vector;
+#else
+	LLVMValueRef matrix_vector = LLVMBuildBitCast(p->builder, matrix.value, total_matrix_type, "");
+	return matrix_vector;
+#endif
 }
 
 LLVMValueRef lb_matrix_trimmed_vector_mask(lbProcedure *p, Type *mt) {
@@ -524,7 +530,6 @@ LLVMValueRef lb_matrix_trimmed_vector_mask(lbProcedure *p, Type *mt) {
 	unsigned stride = cast(unsigned)matrix_type_stride_in_elems(mt);
 	unsigned row_count = cast(unsigned)mt->Matrix.row_count;
 	unsigned column_count = cast(unsigned)mt->Matrix.column_count;
-	
 	unsigned mask_elems_index = 0;
 	auto mask_elems = slice_make<LLVMValueRef>(permanent_allocator(), row_count*column_count);
 	for (unsigned j = 0; j < column_count; j++) {
@@ -540,7 +545,17 @@ LLVMValueRef lb_matrix_trimmed_vector_mask(lbProcedure *p, Type *mt) {
 
 LLVMValueRef lb_matrix_to_trimmed_vector(lbProcedure *p, lbValue m) {
 	LLVMValueRef vector = lb_matrix_to_vector(p, m);
-	LLVMValueRef mask = lb_matrix_trimmed_vector_mask(p, m.type);
+	
+	Type *mt = base_type(m.type);
+	GB_ASSERT(mt->kind == Type_Matrix);
+	
+	unsigned stride = cast(unsigned)matrix_type_stride_in_elems(mt);
+	unsigned row_count = cast(unsigned)mt->Matrix.row_count;
+	if (stride == row_count) {
+		return vector;
+	}
+	
+	LLVMValueRef mask = lb_matrix_trimmed_vector_mask(p, mt);
 	LLVMValueRef trimmed_vector = LLVMBuildShuffleVector(p->builder, vector, LLVMGetUndef(LLVMTypeOf(vector)), mask, "");
 	return trimmed_vector;
 }
@@ -791,7 +806,7 @@ lbValue lb_emit_matrix_mul_vector(lbProcedure *p, lbValue lhs, lbValue rhs, Type
 		
 		for (unsigned row_index = 0; row_index < column_count; row_index++) {
 			LLVMValueRef value = lb_emit_struct_ev(p, rhs, row_index).value;
-			LLVMValueRef row = llvm_splat(p, value, row_count);
+			LLVMValueRef row = llvm_vector_broadcast(p, value, row_count);
 			v_rows[row_index] = row;
 		}
 		
@@ -866,7 +881,7 @@ lbValue lb_emit_vector_mul_matrix(lbProcedure *p, lbValue lhs, lbValue rhs, Type
 		
 		for (unsigned column_index = 0; column_index < row_count; column_index++) {
 			LLVMValueRef value = lb_emit_struct_ev(p, lhs, column_index).value;
-			LLVMValueRef row = llvm_splat(p, value, column_count);
+			LLVMValueRef row = llvm_vector_broadcast(p, value, column_count);
 			v_rows[column_index] = row;
 		}
 		

+ 6 - 2
src/llvm_backend_general.cpp

@@ -512,8 +512,7 @@ void lb_emit_slice_bounds_check(lbProcedure *p, Token token, lbValue low, lbValu
 	}
 }
 
-bool lb_try_update_alignment(lbValue ptr, unsigned alignment)  {
-	LLVMValueRef addr_ptr = ptr.value;
+bool lb_try_update_alignment(LLVMValueRef addr_ptr, unsigned alignment) {
 	if (LLVMIsAGlobalValue(addr_ptr) || LLVMIsAAllocaInst(addr_ptr) || LLVMIsALoadInst(addr_ptr)) {
 		if (LLVMGetAlignment(addr_ptr) < alignment) {
 			if (LLVMIsAAllocaInst(addr_ptr) || LLVMIsAGlobalValue(addr_ptr)) {
@@ -525,6 +524,11 @@ bool lb_try_update_alignment(lbValue ptr, unsigned alignment)  {
 	return false;
 }
 
+bool lb_try_update_alignment(lbValue ptr, unsigned alignment) {
+	return lb_try_update_alignment(ptr.value, alignment);
+}
+
+
 bool lb_try_vector_cast(lbModule *m, lbValue ptr, LLVMTypeRef *vector_type_) {
 	Type *array_type = base_type(type_deref(ptr.type));
 	GB_ASSERT(is_type_array_like(array_type));

+ 1 - 1
src/llvm_backend_utility.cpp

@@ -1526,7 +1526,7 @@ LLVMValueRef llvm_mask_zero(lbModule *m, unsigned count) {
 	return LLVMConstNull(LLVMVectorType(lb_type(m, t_u32), count));
 }
 
-LLVMValueRef llvm_splat(lbProcedure *p, LLVMValueRef value, unsigned count) {
+LLVMValueRef llvm_vector_broadcast(lbProcedure *p, LLVMValueRef value, unsigned count) {
 	GB_ASSERT(count > 0);
 	if (LLVMIsConstant(value)) {
 		LLVMValueRef single = LLVMConstVector(&value, 1);

+ 113 - 92
src/types.cpp

@@ -360,8 +360,8 @@ enum TypeInfoFlag : u32 {
 
 
 enum : int {
-	MIN_MATRIX_ELEMENT_COUNT = 1,
-	MAX_MATRIX_ELEMENT_COUNT = 16,
+	MATRIX_ELEMENT_COUNT_MIN = 1,
+	MATRIX_ELEMENT_COUNT_MAX = 16,
 };
 
 
@@ -700,6 +700,74 @@ bool is_type_pointer(Type *t);
 bool is_type_slice(Type *t);
 bool is_type_integer(Type *t);
 bool type_set_offsets(Type *t);
+Type *base_type(Type *t);
+
+i64 type_size_of_internal(Type *t, TypePath *path);
+i64 type_align_of_internal(Type *t, TypePath *path);
+
+
+// IMPORTANT TODO(bill): SHould this TypePath code be removed since type cycle checking is handled much earlier on?
+
+struct TypePath {
+	Array<Entity *> path; // Entity_TypeName;
+	bool failure;
+};
+
+
+void type_path_init(TypePath *tp) {
+	tp->path.allocator = heap_allocator();
+}
+
+void type_path_free(TypePath *tp) {
+	array_free(&tp->path);
+}
+
+void type_path_print_illegal_cycle(TypePath *tp, isize start_index) {
+	GB_ASSERT(tp != nullptr);
+
+	GB_ASSERT(start_index < tp->path.count);
+	Entity *e = tp->path[start_index];
+	GB_ASSERT(e != nullptr);
+	error(e->token, "Illegal type declaration cycle of `%.*s`", LIT(e->token.string));
+	// NOTE(bill): Print cycle, if it's deep enough
+	for (isize j = start_index; j < tp->path.count; j++) {
+		Entity *e = tp->path[j];
+		error(e->token, "\t%.*s refers to", LIT(e->token.string));
+	}
+	// NOTE(bill): This will only print if the path count > 1
+	error(e->token, "\t%.*s", LIT(e->token.string));
+	tp->failure = true;
+	e->type->failure = true;
+	base_type(e->type)->failure = true;
+}
+
+bool type_path_push(TypePath *tp, Type *t) {
+	GB_ASSERT(tp != nullptr);
+	if (t->kind != Type_Named) {
+		return false;
+	}
+	Entity *e = t->Named.type_name;
+
+	for (isize i = 0; i < tp->path.count; i++) {
+		Entity *p = tp->path[i];
+		if (p == e) {
+			type_path_print_illegal_cycle(tp, i);
+		}
+	}
+
+	array_add(&tp->path, e);
+	return true;
+}
+
+void type_path_pop(TypePath *tp) {
+	if (tp != nullptr && tp->path.count > 0) {
+		array_pop(&tp->path);
+	}
+}
+
+
+#define FAILURE_SIZE      0
+#define FAILURE_ALIGNMENT 0
 
 void init_type_mutex(void) {
 	mutex_init(&g_type_mutex);
@@ -1251,6 +1319,42 @@ bool is_type_matrix(Type *t) {
 	return t->kind == Type_Matrix;
 }
 
+i64 matrix_align_of(Type *t, struct TypePath *tp) {
+	t = base_type(t);
+	GB_ASSERT(t->kind == Type_Matrix);
+	
+	Type *elem = t->Matrix.elem;
+	i64 row_count = gb_max(t->Matrix.row_count, 1);
+
+	bool pop = type_path_push(tp, elem);
+	if (tp->failure) {
+		return FAILURE_ALIGNMENT;
+	}
+
+	i64 elem_align = type_align_of_internal(elem, tp);
+	if (pop) type_path_pop(tp);
+	
+	i64 elem_size = type_size_of(elem);
+	
+
+	// NOTE(bill, 2021-10-25): The alignment strategy here is to have zero padding
+	// It would be better for performance to pad each column so that each column
+	// could be maximally aligned but as a compromise, having no padding will be
+	// beneficial to third libraries that assume no padding
+	
+	i64 total_expected_size = row_count*t->Matrix.column_count*elem_size;
+	// i64 min_alignment = prev_pow2(elem_align * row_count);
+	i64 min_alignment = prev_pow2(total_expected_size);
+	while ((total_expected_size % min_alignment) != 0) {
+		min_alignment >>= 1;
+	}
+	GB_ASSERT(min_alignment >= elem_align);
+	
+	i64 align = gb_min(min_alignment, build_context.max_align);
+	return align;
+}
+
+
 i64 matrix_type_stride_in_bytes(Type *t, struct TypePath *tp) {
 	t = base_type(t);
 	GB_ASSERT(t->kind == Type_Matrix);
@@ -1266,21 +1370,16 @@ i64 matrix_type_stride_in_bytes(Type *t, struct TypePath *tp) {
 	} else {
 		elem_size = type_size_of(t->Matrix.elem);
 	}
-	
 
 	i64 stride_in_bytes = 0;
 	
+	// NOTE(bill, 2021-10-25): The alignment strategy here is to have zero padding
+	// It would be better for performance to pad each column so that each column
+	// could be maximally aligned but as a compromise, having no padding will be
+	// beneficial to third libraries that assume no padding
 	i64 row_count = t->Matrix.row_count;
-#if 0	
-	if (row_count == 1) {
-		stride_in_bytes = elem_size;
-	} else {	
-		i64 matrix_alignment = type_align_of(t);
-		stride_in_bytes = align_formula(elem_size*row_count, matrix_alignment);
-	}
-#else
 	stride_in_bytes = elem_size*row_count;
-#endif
+	
 	t->Matrix.stride_in_bytes = stride_in_bytes;
 	return stride_in_bytes;
 }
@@ -2969,71 +3068,6 @@ Slice<i32> struct_fields_index_by_increasing_offset(gbAllocator allocator, Type
 
 
 
-
-// IMPORTANT TODO(bill): SHould this TypePath code be removed since type cycle checking is handled much earlier on?
-
-struct TypePath {
-	Array<Entity *> path; // Entity_TypeName;
-	bool failure;
-};
-
-
-void type_path_init(TypePath *tp) {
-	tp->path.allocator = heap_allocator();
-}
-
-void type_path_free(TypePath *tp) {
-	array_free(&tp->path);
-}
-
-void type_path_print_illegal_cycle(TypePath *tp, isize start_index) {
-	GB_ASSERT(tp != nullptr);
-
-	GB_ASSERT(start_index < tp->path.count);
-	Entity *e = tp->path[start_index];
-	GB_ASSERT(e != nullptr);
-	error(e->token, "Illegal type declaration cycle of `%.*s`", LIT(e->token.string));
-	// NOTE(bill): Print cycle, if it's deep enough
-	for (isize j = start_index; j < tp->path.count; j++) {
-		Entity *e = tp->path[j];
-		error(e->token, "\t%.*s refers to", LIT(e->token.string));
-	}
-	// NOTE(bill): This will only print if the path count > 1
-	error(e->token, "\t%.*s", LIT(e->token.string));
-	tp->failure = true;
-	e->type->failure = true;
-	base_type(e->type)->failure = true;
-}
-
-bool type_path_push(TypePath *tp, Type *t) {
-	GB_ASSERT(tp != nullptr);
-	if (t->kind != Type_Named) {
-		return false;
-	}
-	Entity *e = t->Named.type_name;
-
-	for (isize i = 0; i < tp->path.count; i++) {
-		Entity *p = tp->path[i];
-		if (p == e) {
-			type_path_print_illegal_cycle(tp, i);
-		}
-	}
-
-	array_add(&tp->path, e);
-	return true;
-}
-
-void type_path_pop(TypePath *tp) {
-	if (tp != nullptr && tp->path.count > 0) {
-		array_pop(&tp->path);
-	}
-}
-
-
-#define FAILURE_SIZE      0
-#define FAILURE_ALIGNMENT 0
-
-
 i64 type_size_of_internal (Type *t, TypePath *path);
 i64 type_align_of_internal(Type *t, TypePath *path);
 i64 type_size_of(Type *t);
@@ -3260,21 +3294,8 @@ i64 type_align_of_internal(Type *t, TypePath *path) {
 		return gb_clamp(next_pow2(type_size_of_internal(t, path)), 1, build_context.max_align);
 	}
 	
-	case Type_Matrix: {
-		Type *elem = t->Matrix.elem;
-		i64 row_count = gb_max(t->Matrix.row_count, 1);
-
-		bool pop = type_path_push(path, elem);
-		if (path->failure) {
-			return FAILURE_ALIGNMENT;
-		}
-		// elem align is used here rather than size as it make a little more sense
-		i64 elem_align = type_align_of_internal(elem, path);
-		if (pop) type_path_pop(path);
-		
-		i64 align = gb_min(next_pow2(elem_align * row_count), build_context.max_align);
-		return align;
-	}
+	case Type_Matrix: 
+		return matrix_align_of(t, path);
 
 	case Type_RelativePointer:
 		return type_align_of_internal(t->RelativePointer.base_integer, path);