Browse Source

Mock out more of the `ep` procedures

gingerBill 2 years ago
parent
commit
45b3ae31af
4 changed files with 316 additions and 53 deletions
  1. 9 0
      src/tilde_backend.hpp
  2. 18 27
      src/tilde_expr.cpp
  3. 234 4
      src/tilde_stmt.cpp
  4. 55 22
      src/types.cpp

+ 9 - 0
src/tilde_backend.hpp

@@ -211,6 +211,8 @@ gb_internal cgAddr cg_addr(cgValue const &value);
 gb_internal cgValue cg_const_value(cgProcedure *p, Type *type, ExactValue const &value);
 gb_internal cgValue cg_const_nil(cgProcedure *p, Type *type);
 
+gb_internal cgValue cg_flatten_value(cgProcedure *p, cgValue value);
+
 gb_internal void cg_build_stmt(cgProcedure *p, Ast *stmt);
 gb_internal void cg_build_stmt_list(cgProcedure *p, Slice<Ast *> const &stmts);
 gb_internal void cg_build_when_stmt(cgProcedure *p, AstWhenStmt *ws);
@@ -227,6 +229,7 @@ gb_internal cgValue cg_emit_load(cgProcedure *p, cgValue const &ptr, bool is_vol
 gb_internal void cg_emit_store(cgProcedure *p, cgValue dst, cgValue const &src, bool is_volatile=false);
 
 gb_internal cgAddr cg_add_local(cgProcedure *p, Type *type, Entity *e, bool zero_init);
+gb_internal cgValue cg_address_from_load_or_generate_local(cgProcedure *p, cgValue value);
 
 gb_internal cgValue cg_build_call_expr(cgProcedure *p, Ast *expr);
 
@@ -237,3 +240,9 @@ gb_internal TB_DebugType *cg_debug_type(cgModule *m, Type *type);
 gb_internal String cg_get_entity_name(cgModule *m, Entity *e);
 
 gb_internal cgValue cg_typeid(cgModule *m, Type *t);
+
+gb_internal cgValue cg_emit_ptr_offset(cgProcedure *p, cgValue ptr, cgValue index);
+gb_internal cgValue cg_emit_array_ep(cgProcedure *p, cgValue s, cgValue index);
+gb_internal cgValue cg_emit_array_epi(cgProcedure *p, cgValue s, i64 index);
+gb_internal cgValue cg_emit_struct_ep(cgProcedure *p, cgValue s, i64 index);
+gb_internal cgValue cg_emit_deep_field_gep(cgProcedure *p, cgValue e, Selection const &sel);

+ 18 - 27
src/tilde_expr.cpp

@@ -790,11 +790,6 @@ gb_internal cgAddr cg_build_addr_slice_expr(cgProcedure *p, Ast *expr) {
 
 			tb_inst_store(p->func, TB_TYPE_PTR, data_ptr, offset, cast(TB_CharUnits)build_context.ptr_size, false);
 			tb_inst_store(p->func, TB_TYPE_INT, len_ptr,  len,    cast(TB_CharUnits)build_context.int_size, false);
-
-			// LLVMValueRef gep0 = cg_emit_struct_ep(p, res.addr, 0).value;
-			// LLVMValueRef gep1 = cg_emit_struct_ep(p, res.addr, 1).value;
-			// LLVMBuildStore(p->builder, ptr, gep0);
-			// LLVMBuildStore(p->builder, len, gep1);
 			return res;
 		}
 	}
@@ -1256,10 +1251,8 @@ gb_internal cgAddr cg_build_addr_internal(cgProcedure *p, Ast *expr) {
 			cgAddr addr = cg_build_addr(p, se->expr);
 			if (addr.kind == cgAddr_Map) {
 				cgValue v = cg_addr_load(p, addr);
-				cgValue a = {}; GB_PANIC("TODO(bill): cg_address_from_load_or_generate_local");
-				// cgValue a = cg_address_from_load_or_generate_local(p, v);
-				GB_PANIC("TODO(bill): cg_emit_deep_field_gep");
-				// a = cg_emit_deep_field_gep(p, a, sel);
+				cgValue a = cg_address_from_load_or_generate_local(p, v);
+				a = cg_emit_deep_field_gep(p, a, sel);
 				return cg_addr(a);
 			} else if (addr.kind == cgAddr_Context) {
 				GB_ASSERT(sel.index.count > 0);
@@ -1271,34 +1264,33 @@ gb_internal cgAddr cg_build_addr_internal(cgProcedure *p, Ast *expr) {
 				return addr;
 			} else if (addr.kind == cgAddr_SoaVariable) {
 				cgValue index = addr.soa.index;
-				i32 first_index = sel.index[0];
+				i64 first_index = sel.index[0];
 				Selection sub_sel = sel;
 				sub_sel.index.data += 1;
 				sub_sel.index.count -= 1;
 
-				cgValue arr = {}; GB_PANIC("TODO(bill): cg_emit_struct_ep");
-				gb_unused(first_index);
-				// cgValue arr = cg_emit_struct_ep(p, addr.addr, first_index);
+				cgValue arr = cg_emit_struct_ep(p, addr.addr, first_index);
 
-				// Type *t = base_type(type_deref(addr.addr.type));
-				// GB_ASSERT(is_type_soa_struct(t));
+				Type *t = base_type(type_deref(addr.addr.type));
+				GB_ASSERT(is_type_soa_struct(t));
 
+				// TODO(bill): bounds checking for soa variable
 				// if (addr.soa.index_expr != nullptr && (!cg_is_const(addr.soa.index) || t->Struct.soa_kind != StructSoa_Fixed)) {
 				// 	cgValue len = cg_soa_struct_len(p, addr.addr);
 				// 	cg_emit_bounds_check(p, ast_token(addr.soa.index_expr), addr.soa.index, len);
 				// }
 
-				// cgValue item = {};
+				cgValue item = {};
 
-				// if (t->Struct.soa_kind == StructSoa_Fixed) {
-				// 	item = cg_emit_array_ep(p, arr, index);
-				// } else {
-				// 	item = cg_emit_ptr_offset(p, cg_emit_load(p, arr), index);
-				// }
-				// if (sub_sel.index.count > 0) {
-				// 	item = cg_emit_deep_field_gep(p, item, sub_sel);
-				// }
-				// return cg_addr(item);
+				if (t->Struct.soa_kind == StructSoa_Fixed) {
+					item = cg_emit_array_ep(p, arr, index);
+				} else {
+					item = cg_emit_ptr_offset(p, cg_emit_load(p, arr), index);
+				}
+				if (sub_sel.index.count > 0) {
+					item = cg_emit_deep_field_gep(p, item, sub_sel);
+				}
+				return cg_addr(item);
 			} else if (addr.kind == cgAddr_Swizzle) {
 				GB_ASSERT(sel.index.count > 0);
 				// NOTE(bill): just patch the index in place
@@ -1310,8 +1302,7 @@ gb_internal cgAddr cg_build_addr_internal(cgProcedure *p, Ast *expr) {
 			}
 
 			cgValue a = cg_addr_get_ptr(p, addr);
-			GB_PANIC("TODO(bill): cg_emit_deep_field_gep");
-			// a = cg_emit_deep_field_gep(p, a, sel);
+			a = cg_emit_deep_field_gep(p, a, sel);
 			return cg_addr(a);
 		}
 	case_end;

+ 234 - 4
src/tilde_stmt.cpp

@@ -253,13 +253,13 @@ gb_internal cgValue cg_addr_get_ptr(cgProcedure *p, cgAddr const &addr) {
 		}
 		offset = cg_emit_conv(p, offset, t_uintptr);
 
-		GB_PANIC("TODO(bill): cg_addr_get_ptr cgAddr_RelativePointer");
-		// cgValue absolute_ptr = cg_emit_arith(p, Token_Add, ptr, offset, t_uintptr);
-		// absolute_ptr = cg_emit_conv(p, absolute_ptr, rel_ptr->RelativePointer.pointer_type);
+		cgValue absolute_ptr = cg_emit_arith(p, Token_Add, ptr, offset, t_uintptr);
+		absolute_ptr = cg_emit_conv(p, absolute_ptr, rel_ptr->RelativePointer.pointer_type);
 
+		GB_PANIC("TODO(bill): cg_addr_get_ptr cgAddr_RelativePointer");
 		// cgValue cond = cg_emit_comp(p, Token_CmpEq, offset, cg_const_nil(p->module, rel_ptr->RelativePointer.base_integer));
 
-		// // NOTE(bill): nil check
+		// NOTE(bill): nil check
 		// cgValue nil_ptr = cg_const_nil(p->module, rel_ptr->RelativePointer.pointer_type);
 		// cgValue final_ptr = cg_emit_select(p, cond, nil_ptr, absolute_ptr);
 		// return final_ptr;
@@ -283,6 +283,219 @@ gb_internal cgValue cg_addr_get_ptr(cgProcedure *p, cgAddr const &addr) {
 	return addr.addr;
 }
 
+gb_internal cgValue cg_emit_ptr_offset(cgProcedure *p, cgValue ptr, cgValue index) {
+	GB_ASSERT(ptr.kind == cgValue_Value);
+	GB_ASSERT(index.kind == cgValue_Value);
+	GB_ASSERT(is_type_pointer(ptr.type) || is_type_multi_pointer(ptr.type));
+	GB_ASSERT(is_type_integer(index.type));
+
+	Type *elem = type_deref(ptr.type, true);
+	i64 stride = type_size_of(elem);
+	ptr.node = tb_inst_array_access(p->func, ptr.node, index.node, stride);
+	return ptr;
+}
+gb_internal cgValue cg_emit_array_ep(cgProcedure *p, cgValue s, cgValue index) {
+	GB_ASSERT(s.kind == cgValue_Value);
+	GB_ASSERT(index.kind == cgValue_Value);
+
+	Type *t = s.type;
+	GB_ASSERT_MSG(is_type_pointer(t), "%s", type_to_string(t));
+	Type *st = base_type(type_deref(t));
+	GB_ASSERT_MSG(is_type_array(st) || is_type_enumerated_array(st) || is_type_matrix(st), "%s", type_to_string(st));
+	GB_ASSERT_MSG(is_type_integer(core_type(index.type)), "%s", type_to_string(index.type));
+
+
+	Type *elem = base_array_type(st);
+	i64 stride = type_size_of(elem);
+	s.node = tb_inst_array_access(p->func, s.node, index.node, stride);
+	return s;
+}
+gb_internal cgValue cg_emit_array_epi(cgProcedure *p, cgValue s, i64 index) {
+	return cg_emit_array_ep(p, s, cg_const_int(p, t_int, index));
+}
+
+
+gb_internal cgValue cg_emit_struct_ep(cgProcedure *p, cgValue s, i64 index) {
+	s = cg_flatten_value(p, s);
+
+	GB_ASSERT(is_type_pointer(s.type));
+	Type *t = base_type(type_deref(s.type));
+	Type *result_type = nullptr;
+
+	if (is_type_relative_pointer(t)) {
+		s = cg_addr_get_ptr(p, cg_addr(s));
+	}
+
+	if (is_type_struct(t)) {
+		result_type = get_struct_field_type(t, index);
+	} else if (is_type_union(t)) {
+		GB_ASSERT(index == -1);
+		GB_PANIC("TODO(bill): cg_emit_union_tag_ptr");
+		// return cg_emit_union_tag_ptr(p, s);
+	} else if (is_type_tuple(t)) {
+		GB_PANIC("TODO(bill): cg_emit_tuple_ep");
+		// return cg_emit_tuple_ep(p, s, index);
+		// return cg_emit_tuple_ep(p, s, index);
+	} else if (is_type_complex(t)) {
+		Type *ft = base_complex_elem_type(t);
+		switch (index) {
+		case 0: result_type = ft; break;
+		case 1: result_type = ft; break;
+		}
+	} else if (is_type_quaternion(t)) {
+		Type *ft = base_complex_elem_type(t);
+		switch (index) {
+		case 0: result_type = ft; break;
+		case 1: result_type = ft; break;
+		case 2: result_type = ft; break;
+		case 3: result_type = ft; break;
+		}
+	} else if (is_type_slice(t)) {
+		switch (index) {
+		case 0: result_type = alloc_type_pointer(t->Slice.elem); break;
+		case 1: result_type = t_int; break;
+		}
+	} else if (is_type_string(t)) {
+		switch (index) {
+		case 0: result_type = t_u8_ptr; break;
+		case 1: result_type = t_int;    break;
+		}
+	} else if (is_type_any(t)) {
+		switch (index) {
+		case 0: result_type = t_rawptr; break;
+		case 1: result_type = t_typeid; break;
+		}
+	} else if (is_type_dynamic_array(t)) {
+		switch (index) {
+		case 0: result_type = alloc_type_pointer(t->DynamicArray.elem); break;
+		case 1: result_type = t_int;       break;
+		case 2: result_type = t_int;       break;
+		case 3: result_type = t_allocator; break;
+		}
+	} else if (is_type_map(t)) {
+		init_map_internal_types(t);
+		Type *itp = alloc_type_pointer(t_raw_map);
+		s = cg_emit_transmute(p, s, itp);
+
+		switch (index) {
+		case 0: result_type = get_struct_field_type(t_raw_map, 0); break;
+		case 1: result_type = get_struct_field_type(t_raw_map, 1); break;
+		case 2: result_type = get_struct_field_type(t_raw_map, 2); break;
+		}
+	} else if (is_type_array(t)) {
+		return cg_emit_array_epi(p, s, index);
+	} else if (is_type_relative_slice(t)) {
+		switch (index) {
+		case 0: result_type = t->RelativeSlice.base_integer; break;
+		case 1: result_type = t->RelativeSlice.base_integer; break;
+		}
+	} else if (is_type_soa_pointer(t)) {
+		switch (index) {
+		case 0: result_type = alloc_type_pointer(t->SoaPointer.elem); break;
+		case 1: result_type = t_int; break;
+		}
+	} else {
+		GB_PANIC("TODO(bill): struct_gep type: %s, %d", type_to_string(s.type), index);
+	}
+
+	GB_ASSERT_MSG(result_type != nullptr, "%s %d", type_to_string(t), index);
+
+	GB_PANIC("TODO(bill): cg_emit_struct_ep_internal");
+	// return cg_emit_struct_ep_internal(p, s, index, result_type);
+	return {};
+}
+
+gb_internal cgValue cg_emit_deep_field_gep(cgProcedure *p, cgValue e, Selection const &sel) {
+	GB_ASSERT(sel.index.count > 0);
+	Type *type = type_deref(e.type);
+
+	for_array(i, sel.index) {
+		i64 index = sel.index[i];
+		if (is_type_pointer(type)) {
+			type = type_deref(type);
+			e = cg_emit_load(p, e);
+		}
+		type = core_type(type);
+
+		if (type->kind == Type_SoaPointer) {
+			cgValue addr = cg_emit_struct_ep(p, e, 0);
+			cgValue index = cg_emit_struct_ep(p, e, 1);
+			addr = cg_emit_load(p, addr);
+			index = cg_emit_load(p, index);
+
+			i64 first_index = sel.index[0];
+			Selection sub_sel = sel;
+			sub_sel.index.data += 1;
+			sub_sel.index.count -= 1;
+
+			cgValue arr = cg_emit_struct_ep(p, addr, first_index);
+
+			Type *t = base_type(type_deref(addr.type));
+			GB_ASSERT(is_type_soa_struct(t));
+
+			if (t->Struct.soa_kind == StructSoa_Fixed) {
+				e = cg_emit_array_ep(p, arr, index);
+			} else {
+				e = cg_emit_ptr_offset(p, cg_emit_load(p, arr), index);
+			}
+		} else if (is_type_quaternion(type)) {
+			e = cg_emit_struct_ep(p, e, index);
+		} else if (is_type_raw_union(type)) {
+			type = get_struct_field_type(type, index);
+			GB_ASSERT(is_type_pointer(e.type));
+			e = cg_emit_transmute(p, e, alloc_type_pointer(type));
+		} else if (is_type_struct(type)) {
+			type = get_struct_field_type(type, index);
+			e = cg_emit_struct_ep(p, e, index);
+		} else if (type->kind == Type_Union) {
+			GB_ASSERT(index == -1);
+			type = t_type_info_ptr;
+			e = cg_emit_struct_ep(p, e, index);
+		} else if (type->kind == Type_Tuple) {
+			type = type->Tuple.variables[index]->type;
+			e = cg_emit_struct_ep(p, e, index);
+		} else if (type->kind == Type_Basic) {
+			switch (type->Basic.kind) {
+			case Basic_any: {
+				if (index == 0) {
+					type = t_rawptr;
+				} else if (index == 1) {
+					type = t_type_info_ptr;
+				}
+				e = cg_emit_struct_ep(p, e, index);
+				break;
+			}
+
+			case Basic_string:
+				e = cg_emit_struct_ep(p, e, index);
+				break;
+
+			default:
+				GB_PANIC("un-gep-able type %s", type_to_string(type));
+				break;
+			}
+		} else if (type->kind == Type_Slice) {
+			e = cg_emit_struct_ep(p, e, index);
+		} else if (type->kind == Type_DynamicArray) {
+			e = cg_emit_struct_ep(p, e, index);
+		} else if (type->kind == Type_Array) {
+			e = cg_emit_array_epi(p, e, index);
+		} else if (type->kind == Type_Map) {
+			e = cg_emit_struct_ep(p, e, index);
+		} else if (type->kind == Type_RelativePointer) {
+			e = cg_emit_struct_ep(p, e, index);
+		} else {
+			GB_PANIC("un-gep-able type %s", type_to_string(type));
+		}
+	}
+
+	return e;
+}
+
+
+
+
+
 
 
 
@@ -369,6 +582,23 @@ gb_internal cgAddr cg_add_local(cgProcedure *p, Type *type, Entity *e, bool zero
 	return addr;
 }
 
+gb_internal cgValue cg_address_from_load_or_generate_local(cgProcedure *p, cgValue value) {
+	switch (value.kind) {
+	case cgValue_Value:
+		if (value.node->type == TB_LOAD) {
+			TB_Node *ptr = value.node->inputs[1];
+			return cg_value(ptr, alloc_type_pointer(value.type));
+		}
+		break;
+	case cgValue_Addr:
+		return cg_value(value.node, alloc_type_pointer(value.type));
+	}
+
+	cgAddr res = cg_add_local(p, value.type, nullptr, false);
+	cg_addr_store(p, res, value);
+	return res.addr;
+}
+
 
 gb_internal void cg_scope_open(cgProcedure *p, Scope *scope) {
 	// TODO(bill): cg_scope_open

+ 55 - 22
src/types.cpp

@@ -725,7 +725,7 @@ struct TypePath;
 
 gb_internal i64      type_size_of   (Type *t);
 gb_internal i64      type_align_of  (Type *t);
-gb_internal i64      type_offset_of (Type *t, i32 index);
+gb_internal i64      type_offset_of (Type *t, i32 index, Type **field_type_=nullptr);
 gb_internal gbString type_to_string (Type *type, bool shorthand=true);
 gb_internal gbString type_to_string (Type *type, gbAllocator allocator, bool shorthand=true);
 gb_internal i64      type_size_of_internal(Type *t, TypePath *path);
@@ -3907,50 +3907,83 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) {
 	return build_context.ptr_size;
 }
 
-gb_internal i64 type_offset_of(Type *t, i32 index) {
+gb_internal i64 type_offset_of(Type *t, i32 index, Type **field_type_) {
 	t = base_type(t);
-	if (t->kind == Type_Struct) {
+	switch (t->kind) {
+	case Type_Struct:
 		type_set_offsets(t);
 		if (gb_is_between(index, 0, t->Struct.fields.count-1)) {
 			GB_ASSERT(t->Struct.offsets != nullptr);
+			if (field_type_) *field_type_ = t->Struct.fields[index]->type;
 			return t->Struct.offsets[index];
 		}
-	} else if (t->kind == Type_Tuple) {
+		break;
+	case Type_Tuple:
 		type_set_offsets(t);
 		if (gb_is_between(index, 0, t->Tuple.variables.count-1)) {
 			GB_ASSERT(t->Tuple.offsets != nullptr);
+			if (field_type_) *field_type_ = t->Tuple.variables[index]->type;
 			return t->Tuple.offsets[index];
 		}
-	}  else if (t->kind == Type_Basic) {
+		break;
+	case Type_Basic:
 		if (t->Basic.kind == Basic_string) {
 			switch (index) {
-			case 0: return 0;                      // data
-			case 1: return build_context.int_size; // len
+			case 0:
+				if (field_type_) *field_type_ = t_u8_ptr;
+				return 0;                      // data
+			case 1:
+				if (field_type_) *field_type_ = t_int;
+				return build_context.int_size; // len
 			}
 		} else if (t->Basic.kind == Basic_any) {
 			switch (index) {
-			case 0: return 0;                      // type_info
-			case 1: return build_context.ptr_size; // data
+			case 0:
+				if (field_type_) *field_type_ = t_rawptr;
+				return 0;                      // data
+			case 1:
+				if (field_type_) *field_type_ = t_typeid;
+				return build_context.ptr_size; // id
 			}
 		}
-	} else if (t->kind == Type_Slice) {
+		break;
+	case Type_Slice:
 		switch (index) {
-		case 0: return 0;                        // data
-		case 1: return 1*build_context.int_size; // len
-		case 2: return 2*build_context.int_size; // cap
+		case 0:
+			if (field_type_) *field_type_ = alloc_type_multi_pointer(t->Slice.elem);
+			return 0;                        // data
+		case 1:
+			if (field_type_) *field_type_ = t_int;
+			return 1*build_context.int_size; // len
 		}
-	} else if (t->kind == Type_DynamicArray) {
+		break;
+	case Type_DynamicArray:
 		switch (index) {
-		case 0: return 0;                        // data
-		case 1: return 1*build_context.int_size; // len
-		case 2: return 2*build_context.int_size; // cap
-		case 3: return 3*build_context.int_size; // allocator
+		case 0:
+			if (field_type_) *field_type_ = alloc_type_multi_pointer(t->DynamicArray.elem);
+			return 0;                        // data
+		case 1:
+			if (field_type_) *field_type_ = t_int;
+			return 1*build_context.int_size; // len
+		case 2:
+			if (field_type_) *field_type_ = t_int;
+			return 2*build_context.int_size; // cap
+		case 3:
+			if (field_type_) *field_type_ = t_allocator;
+			return 3*build_context.int_size; // allocator
 		}
-	} else if (t->kind == Type_Union) {
-		/* i64 s = */ type_size_of(t);
-		switch (index) {
-		case -1: return align_formula(t->Union.variant_block_size, build_context.ptr_size); // __type_info
+		break;
+	case Type_Union:
+		if (!is_type_union_maybe_pointer(t)) {
+			/* i64 s = */ type_size_of(t);
+			switch (index) {
+			case -1:
+				if (field_type_) *field_type_ = union_tag_type(t);
+				union_tag_size(t);
+				return t->Union.variant_block_size;
+			}
 		}
+		break;
 	}
 	GB_ASSERT(index == 0);
 	return 0;