Browse Source

Remove check on array/slice/dynamic element size

Ginger Bill 8 years ago
parent
commit
206a3e093c
5 changed files with 56 additions and 32 deletions
  1. 1 0
      code/demo.odin
  2. 1 1
      core/math.odin
  3. 2 1
      src/check_decl.c
  4. 15 7
      src/check_expr.c
  5. 37 23
      src/types.c

+ 1 - 0
code/demo.odin

@@ -1,4 +1,5 @@
 #import "fmt.odin";
+#import "math.odin";
 
 main :: proc() {
 	immutable program := "+ + * - /";

+ 1 - 1
core/math.odin

@@ -277,7 +277,7 @@ mat4_rotate :: proc(v: Vec3, angle_radians: f32) -> Mat4 {
 	s := sin(angle_radians);
 
 	a := norm(v);
-	t := a * Vec3{1-c};
+	t := a * (1-c);
 
 	rot := mat4_identity();
 

+ 2 - 1
src/check_decl.c

@@ -127,7 +127,8 @@ void check_init_constant(Checker *c, Entity *e, Operand *operand) {
 
 void check_type_decl(Checker *c, Entity *e, AstNode *type_expr, Type *def) {
 	GB_ASSERT(e->type == NULL);
-	Type *named = make_type_named(c->allocator, e->token.string, NULL, e);
+	String name = e->token.string;
+	Type *named = make_type_named(c->allocator, name, NULL, e);
 	named->Named.type_name = e;
 	if (def != NULL && def->kind == Type_Named) {
 		def->Named.base = named;

+ 15 - 7
src/check_expr.c

@@ -553,7 +553,7 @@ void check_struct_type(Checker *c, Type *struct_type, AstNode *node) {
 	struct_type->Record.field_count         = field_count;
 	struct_type->Record.names = make_names_field_for_record(c, c->context.scope);
 
-	if (!st->is_packed && !st->is_ordered) {
+	if (false && !st->is_packed && !st->is_ordered) {
 		// NOTE(bill): Reorder fields for reduced size/performance
 
 		Entity **reordered_fields = gb_alloc_array(c->allocator, Entity *, field_count);
@@ -574,6 +574,10 @@ void check_struct_type(Checker *c, Type *struct_type, AstNode *node) {
 		struct_type->Record.fields = reordered_fields;
 	}
 
+	{
+		// i64 size = type_size_of(c->allocator, struct_type);
+	}
+
 	type_set_offsets(c->allocator, struct_type);
 
 	if (st->align != NULL) {
@@ -657,8 +661,6 @@ void check_union_type(Checker *c, Type *union_type, AstNode *node) {
 	union_type->Record.are_offsets_set     = false;
 	union_type->Record.is_ordered          = true;
 
-
-
 	for_array(i, ut->variants) {
 		AstNode *variant = ut->variants.e[i];
 		if (variant->kind != AstNode_UnionField) {
@@ -1601,21 +1603,25 @@ bool check_type_extra_internal(Checker *c, AstNode *e, Type **type, Type *named_
 				error_node(at->count, ".. can only be used in conjuction with compound literals");
 				count = 0;
 			}
+#if 0
 			i64 esz = type_size_of(c->allocator, elem);
-			if (esz <= 0) {
+			if (esz == 0) {
 				gbString str = type_to_string(elem);
 				error_node(at->elem, "Zero sized element type `%s` is not allowed", str);
 				gb_string_free(str);
 			}
+#endif
 			*type = make_type_array(c->allocator, elem, count);
 		} else {
 			Type *elem = check_type(c, at->elem);
+#if 0
 			i64 esz = type_size_of(c->allocator, elem);
-			if (esz <= 0) {
+			if (esz == 0) {
 				gbString str = type_to_string(elem);
 				error_node(at->elem, "Zero sized element type `%s` is not allowed", str);
 				gb_string_free(str);
 			}
+#endif
 			*type = make_type_slice(c->allocator, elem);
 		}
 		return true;
@@ -1624,11 +1630,13 @@ bool check_type_extra_internal(Checker *c, AstNode *e, Type **type, Type *named_
 	case_ast_node(dat, DynamicArrayType, e);
 		Type *elem = check_type_extra(c, dat->elem, NULL);
 		i64 esz = type_size_of(c->allocator, elem);
-		if (esz <= 0) {
+#if 0
+		if (esz == 0) {
 			gbString str = type_to_string(elem);
 			error_node(dat->elem, "Zero sized element type `%s` is not allowed", str);
 			gb_string_free(str);
 		}
+#endif
 		*type = make_type_dynamic_array(c->allocator, elem);
 		return true;
 	case_end;
@@ -1735,7 +1743,7 @@ Type *check_type_extra(Checker *c, AstNode *e, Type *named_type) {
 		type = t_invalid;
 	}
 
-	if (is_type_named(type)) {
+	if (type->kind == Type_Named) {
 		if (type->Named.base == NULL) {
 			gbString name = type_to_string(type);
 			error_node(e, "Invalid type definition of %s", name);

+ 37 - 23
src/types.c

@@ -95,6 +95,7 @@ typedef struct TypeRecord {
 
 	i64 *    offsets;
 	bool     are_offsets_set;
+	bool     are_offsets_being_processed;
 	bool     is_packed;
 	bool     is_ordered;
 
@@ -1434,30 +1435,35 @@ void type_path_free(TypePath *tp) {
 	array_free(&tp->path);
 }
 
+void type_path_print_illegal_cycle(TypePath *tp, isize start_index) {
+	GB_ASSERT(tp != NULL);
+
+	GB_ASSERT(start_index < tp->path.count);
+	Type *t = tp->path.e[start_index];
+	GB_ASSERT(t != NULL);
+
+	GB_ASSERT_MSG(is_type_named(t), "%s", type_to_string(t));
+	Entity *e = t->Named.type_name;
+	error(e->token, "Illegal declaration cycle of `%.*s`", LIT(t->Named.name));
+	// NOTE(bill): Print cycle, if it's deep enough
+	for (isize j = start_index; j < tp->path.count; j++) {
+		Type *t = tp->path.e[j];
+		GB_ASSERT_MSG(is_type_named(t), "%s", type_to_string(t));
+		Entity *e = t->Named.type_name;
+		error(e->token, "\t%.*s refers to", LIT(t->Named.name));
+	}
+	// NOTE(bill): This will only print if the path count > 1
+	error(e->token, "\t%.*s", LIT(t->Named.name));
+	tp->failure = true;
+	t->failure = true;
+}
+
 TypePath *type_path_push(TypePath *tp, Type *t) {
 	GB_ASSERT(tp != NULL);
 
-	for (isize i = 1; i < tp->path.count; i++) {
+	for (isize i = 0; i < tp->path.count; i++) {
 		if (tp->path.e[i] == t) {
-			// TODO(bill):
-			GB_ASSERT_MSG(is_type_named(t), "%s", type_to_string(t));
-			Entity *e = t->Named.type_name;
-			error(e->token, "Illegal declaration cycle of `%.*s`", LIT(t->Named.name));
-			// NOTE(bill): Print cycle, if it's deep enough
-			for (isize j = i; j < tp->path.count; j++) {
-				Type *t = tp->path.e[j];
-				GB_ASSERT_MSG(is_type_named(t), "%s", type_to_string(t));
-				Entity *e = t->Named.type_name;
-				error(e->token, "\t%.*s refers to", LIT(t->Named.name));
-			}
-			// NOTE(bill): This will only print if the path count > 1
-			error(e->token, "\t%.*s", LIT(t->Named.name));
-			tp->failure = true;
-			t->failure = true;
-
-			// NOTE(bill): Just quit immediately
-			// TODO(bill): Try and solve this gracefully
-			// gb_exit(1);
+			type_path_print_illegal_cycle(tp, i);
 		}
 	}
 
@@ -1681,13 +1687,14 @@ i64 *type_set_offsets_of(gbAllocator allocator, Entity **fields, isize field_cou
 	i64 curr_offset = 0;
 	if (is_packed) {
 		for (isize i = 0; i < field_count; i++) {
+			i64 size = type_size_of(allocator, fields[i]->type);
 			offsets[i] = curr_offset;
-			curr_offset += type_size_of(allocator, fields[i]->type);
+			curr_offset += size;
 		}
 	} else {
 		for (isize i = 0; i < field_count; i++) {
-			i64 align = type_align_of(allocator, fields[i]->type);
-			i64 size  = type_size_of(allocator, fields[i]->type);
+			i64 align = max(type_align_of(allocator, fields[i]->type), 1);
+			i64 size  = max(type_size_of(allocator, fields[i]->type), 0);
 			curr_offset = align_formula(curr_offset, align);
 			offsets[i] = curr_offset;
 			curr_offset += size;
@@ -1700,18 +1707,21 @@ bool type_set_offsets(gbAllocator allocator, Type *t) {
 	t = base_type(t);
 	if (is_type_struct(t)) {
 		if (!t->Record.are_offsets_set) {
+			t->Record.are_offsets_being_processed = true;
 			t->Record.offsets = type_set_offsets_of(allocator, t->Record.fields, t->Record.field_count, t->Record.is_packed);
 			t->Record.are_offsets_set = true;
 			return true;
 		}
 	} else if (is_type_union(t)) {
 		if (!t->Record.are_offsets_set) {
+			t->Record.are_offsets_being_processed = true;
 			t->Record.offsets = type_set_offsets_of(allocator, t->Record.fields, t->Record.field_count, false);
 			t->Record.are_offsets_set = true;
 			return true;
 		}
 	}  else if (is_type_tuple(t)) {
 		if (!t->Tuple.are_offsets_set) {
+			t->Record.are_offsets_being_processed = true;
 			t->Tuple.offsets = type_set_offsets_of(allocator, t->Tuple.variables, t->Tuple.variable_count, false);
 			t->Tuple.are_offsets_set = true;
 			return true;
@@ -1843,6 +1853,10 @@ i64 type_size_of_internal(gbAllocator allocator, Type *t, TypePath *path) {
 			if (path->failure) {
 				return FAILURE_SIZE;
 			}
+			if (t->Record.are_offsets_being_processed && t->Record.offsets == NULL) {
+				type_path_print_illegal_cycle(path, path->path.count-1);
+				return FAILURE_SIZE;
+			}
 			type_set_offsets(allocator, t);
 			i64 size = t->Record.offsets[count-1] + type_size_of_internal(allocator, t->Record.fields[count-1]->type, path);
 			return align_formula(size, align);