浏览代码

Use a global to store the build context information

Ginger Bill 8 年之前
父节点
当前提交
bd27c24fab
共有 9 个文件被更改,包括 195 次插入220 次删除
  1. 8 21
      src/build_settings.c
  2. 2 2
      src/check_decl.c
  3. 22 24
      src/check_expr.c
  4. 14 10
      src/checker.c
  5. 46 48
      src/ir.c
  6. 19 15
      src/ir_print.c
  7. 4 5
      src/main.c
  8. 5 9
      src/parser.c
  9. 75 86
      src/types.c

+ 8 - 21
src/build.c → src/build_settings.c

@@ -13,6 +13,11 @@ typedef struct BuildContext {
 	bool   is_dll;
 } BuildContext;
 
+
+gb_global BuildContext build_context = {0};
+
+
+
 // TODO(bill): OS dependent versions for the BuildContext
 // join_path
 // is_dir
@@ -129,25 +134,6 @@ String odin_root_dir(void) {
 #error Implement system
 #endif
 
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
 
 #if defined(GB_SYSTEM_WINDOWS)
 String path_to_fullpath(gbAllocator a, String s) {
@@ -216,9 +202,10 @@ String get_fullpath_core(gbAllocator a, String path) {
 
 
 
-void init_build_context(BuildContext *bc) {
+void init_build_context(void) {
+	BuildContext *bc = &build_context;
 	bc->ODIN_VENDOR  = str_lit("odin");
-	bc->ODIN_VERSION = str_lit("0.1.0");
+	bc->ODIN_VERSION = str_lit("0.1.1");
 	bc->ODIN_ROOT    = odin_root_dir();
 
 #if defined(GB_SYSTEM_WINDOWS)

+ 2 - 2
src/check_decl.c

@@ -300,7 +300,7 @@ void check_proc_lit(Checker *c, Entity *e, DeclInfo *d) {
 	}
 
 	if (is_foreign) {
-		MapEntity *fp = &c->info.foreign_procs;
+		MapEntity *fp = &c->info.foreigns;
 		String name = e->token.string;
 		if (pd->foreign_name.len > 0) {
 			name = pd->foreign_name;
@@ -355,7 +355,7 @@ void check_proc_lit(Checker *c, Entity *e, DeclInfo *d) {
 		}
 
 		if (is_link_name || is_export) {
-			MapEntity *fp = &c->info.foreign_procs;
+			MapEntity *fp = &c->info.foreigns;
 
 			e->Procedure.link_name = name;
 

+ 22 - 24
src/check_expr.c

@@ -473,7 +473,6 @@ void check_fields(Checker *c, AstNode *node, AstNodeArray decls,
 
 // TODO(bill): Cleanup struct field reordering
 // TODO(bill): Inline sorting procedure?
-gb_global BaseTypeSizes __checker_sizes = {0};
 gb_global gbAllocator   __checker_allocator = {0};
 
 GB_COMPARE_PROC(cmp_struct_entity_size) {
@@ -487,10 +486,10 @@ GB_COMPARE_PROC(cmp_struct_entity_size) {
 	GB_ASSERT(y != NULL);
 	GB_ASSERT(x->kind == Entity_Variable);
 	GB_ASSERT(y->kind == Entity_Variable);
-	i64 xa = type_align_of(__checker_sizes, __checker_allocator, x->type);
-	i64 ya = type_align_of(__checker_sizes, __checker_allocator, y->type);
-	i64 xs = type_size_of(__checker_sizes, __checker_allocator, x->type);
-	i64 ys = type_size_of(__checker_sizes, __checker_allocator, y->type);
+	i64 xa = type_align_of(__checker_allocator, x->type);
+	i64 ya = type_align_of(__checker_allocator, y->type);
+	i64 xs = type_size_of(__checker_allocator, x->type);
+	i64 ys = type_size_of(__checker_allocator, y->type);
 
 	if (xa == ya) {
 		if (xs == ys) {
@@ -545,7 +544,6 @@ void check_struct_type(Checker *c, Type *struct_type, AstNode *node) {
 
 		// NOTE(bill): Hacky thing
 		// TODO(bill): Probably make an inline sorting procedure rather than use global variables
-		__checker_sizes = c->sizes;
 		__checker_allocator = c->allocator;
 		// NOTE(bill): compound literal order must match source not layout
 		gb_sort_array(reordered_fields, field_count, cmp_struct_entity_size);
@@ -557,7 +555,7 @@ void check_struct_type(Checker *c, Type *struct_type, AstNode *node) {
 		struct_type->Record.fields = reordered_fields;
 	}
 
-	type_set_offsets(c->sizes, c->allocator, struct_type);
+	type_set_offsets(c->allocator, struct_type);
 
 	if (st->align != NULL) {
 		if (st->is_packed) {
@@ -584,7 +582,7 @@ void check_struct_type(Checker *c, Type *struct_type, AstNode *node) {
 				}
 
 				// NOTE(bill): Success!!!
-				i64 custom_align = gb_clamp(align, 1, c->sizes.max_align);
+				i64 custom_align = gb_clamp(align, 1, build_context.max_align);
 				if (custom_align < align) {
 					warning_node(st->align, "Custom alignment has been clamped to %lld from %lld", align, custom_align);
 				}
@@ -1188,7 +1186,7 @@ void check_map_type(Checker *c, Type *type, AstNode *node) {
 		entry_type->Record.fields_in_src_order = fields;
 		entry_type->Record.field_count         = field_count;
 
-		type_set_offsets(c->sizes, a, entry_type);
+		type_set_offsets(a, entry_type);
 		type->Map.entry_type = entry_type;
 	}
 
@@ -1219,7 +1217,7 @@ void check_map_type(Checker *c, Type *type, AstNode *node) {
 		generated_struct_type->Record.fields_in_src_order = fields;
 		generated_struct_type->Record.field_count         = field_count;
 
-		type_set_offsets(c->sizes, a, generated_struct_type);
+		type_set_offsets(a, generated_struct_type);
 		type->Map.generated_struct_type = generated_struct_type;
 	}
 
@@ -1576,7 +1574,7 @@ bool check_representable_as_constant(Checker *c, ExactValue in_value, Type *type
 		if (out_value) *out_value = v;
 		i64 i = v.value_integer;
 		u64 u = *cast(u64 *)&i;
-		i64 s = 8*type_size_of(c->sizes, c->allocator, type);
+		i64 s = 8*type_size_of(c->allocator, type);
 		u64 umax = ~0ull;
 		if (s < 64) {
 			umax = (1ull << s) - 1ull;
@@ -1734,7 +1732,7 @@ void check_unary_expr(Checker *c, Operand *o, Token op, AstNode *node) {
 
 		i32 precision = 0;
 		if (is_type_unsigned(type)) {
-			precision = cast(i32)(8 * type_size_of(c->sizes, c->allocator, type));
+			precision = cast(i32)(8 * type_size_of(c->allocator, type));
 		}
 		o->value = exact_unary_operator_value(op.kind, o->value, precision);
 
@@ -2059,7 +2057,7 @@ Operand check_ptr_addition(Checker *c, TokenKind op, Operand *ptr, Operand *offs
 
 
 	if (ptr->mode == Addressing_Constant && offset->mode == Addressing_Constant) {
-		i64 elem_size = type_size_of(c->sizes, c->allocator, ptr->type);
+		i64 elem_size = type_size_of(c->allocator, ptr->type);
 		i64 ptr_val = ptr->value.value_pointer;
 		i64 offset_val = exact_value_to_integer(offset->value).value_integer;
 		i64 new_ptr_val = ptr_val;
@@ -2257,7 +2255,7 @@ void check_binary_expr(Checker *c, Operand *x, AstNode *node) {
 		if (is_type_pointer(type)) {
 			GB_ASSERT(op.kind == Token_Sub);
 			i64 bytes = a.value_pointer - b.value_pointer;
-			i64 diff = bytes/type_size_of(c->sizes, c->allocator, type);
+			i64 diff = bytes/type_size_of(c->allocator, type);
 			x->value = make_exact_value_pointer(diff);
 			return;
 		}
@@ -2941,7 +2939,7 @@ bool check_builtin_procedure(Checker *c, Operand *operand, AstNode *call, i32 id
 		}
 
 		operand->mode = Addressing_Constant;
-		operand->value = make_exact_value_integer(type_size_of(c->sizes, c->allocator, type));
+		operand->value = make_exact_value_integer(type_size_of(c->allocator, type));
 		operand->type = t_untyped_integer;
 
 	} break;
@@ -2954,7 +2952,7 @@ bool check_builtin_procedure(Checker *c, Operand *operand, AstNode *call, i32 id
 		}
 
 		operand->mode = Addressing_Constant;
-		operand->value = make_exact_value_integer(type_size_of(c->sizes, c->allocator, operand->type));
+		operand->value = make_exact_value_integer(type_size_of(c->allocator, operand->type));
 		operand->type = t_untyped_integer;
 		break;
 
@@ -2966,7 +2964,7 @@ bool check_builtin_procedure(Checker *c, Operand *operand, AstNode *call, i32 id
 			return false;
 		}
 		operand->mode = Addressing_Constant;
-		operand->value = make_exact_value_integer(type_align_of(c->sizes, c->allocator, type));
+		operand->value = make_exact_value_integer(type_align_of(c->allocator, type));
 		operand->type = t_untyped_integer;
 	} break;
 
@@ -2978,7 +2976,7 @@ bool check_builtin_procedure(Checker *c, Operand *operand, AstNode *call, i32 id
 		}
 
 		operand->mode = Addressing_Constant;
-		operand->value = make_exact_value_integer(type_align_of(c->sizes, c->allocator, operand->type));
+		operand->value = make_exact_value_integer(type_align_of(c->allocator, operand->type));
 		operand->type = t_untyped_integer;
 		break;
 
@@ -3022,7 +3020,7 @@ bool check_builtin_procedure(Checker *c, Operand *operand, AstNode *call, i32 id
 		}
 
 		operand->mode = Addressing_Constant;
-		operand->value = make_exact_value_integer(type_offset_of_from_selection(c->sizes, c->allocator, type, sel));
+		operand->value = make_exact_value_integer(type_offset_of_from_selection(c->allocator, type, sel));
 		operand->type  = t_untyped_integer;
 	} break;
 
@@ -3071,7 +3069,7 @@ bool check_builtin_procedure(Checker *c, Operand *operand, AstNode *call, i32 id
 
 		operand->mode = Addressing_Constant;
 		// IMPORTANT TODO(bill): Fix for anonymous fields
-		operand->value = make_exact_value_integer(type_offset_of_from_selection(c->sizes, c->allocator, type, sel));
+		operand->value = make_exact_value_integer(type_offset_of_from_selection(c->allocator, type, sel));
 		operand->type  = t_untyped_integer;
 	} break;
 
@@ -3303,7 +3301,7 @@ bool check_builtin_procedure(Checker *c, Operand *operand, AstNode *call, i32 id
 		if (operand->mode == Addressing_Constant &&
 		    op.mode == Addressing_Constant) {
 			i64 ptr = operand->value.value_pointer;
-			i64 elem_size = type_size_of(c->sizes, c->allocator, ptr_type->Pointer.elem);
+			i64 elem_size = type_size_of(c->allocator, ptr_type->Pointer.elem);
 			ptr += elem_size * op.value.value_integer;
 			operand->value.value_pointer = ptr;
 		} else {
@@ -3366,7 +3364,7 @@ bool check_builtin_procedure(Checker *c, Operand *operand, AstNode *call, i32 id
 		    op.mode == Addressing_Constant) {
 			u8 *ptr_a = cast(u8 *)operand->value.value_pointer;
 			u8 *ptr_b = cast(u8 *)op.value.value_pointer;
-			isize elem_size = type_size_of(c->sizes, c->allocator, ptr_type->Pointer.elem);
+			isize elem_size = type_size_of(c->allocator, ptr_type->Pointer.elem);
 			operand->value = make_exact_value_integer((ptr_a - ptr_b) / elem_size);
 		} else {
 			operand->mode = Addressing_Value;
@@ -4841,8 +4839,8 @@ ExprKind check__expr_base(Checker *c, Operand *o, AstNode *node, Type *type_hint
 				goto error;
 			}
 
-			i64 srcz = type_size_of(c->sizes, c->allocator, o->type);
-			i64 dstz = type_size_of(c->sizes, c->allocator, t);
+			i64 srcz = type_size_of(c->allocator, o->type);
+			i64 dstz = type_size_of(c->allocator, t);
 			if (srcz != dstz) {
 				gbString expr_str = expr_to_string(o->expr);
 				gbString type_str = type_to_string(t);

+ 14 - 10
src/checker.c

@@ -264,7 +264,7 @@ typedef struct CheckerInfo {
 	MapScope             scopes;          // Key: AstNode * | Node       -> Scope
 	MapExprInfo          untyped;         // Key: AstNode * | Expression -> ExprInfo
 	MapDeclInfo          entities;        // Key: Entity *
-	MapEntity            foreign_procs;   // Key: String
+	MapEntity            foreigns;        // Key: String
 	MapAstFile           files;           // Key: String (full path)
 	MapIsize             type_info_map;   // Key: Type *
 	isize                type_info_count;
@@ -275,13 +275,11 @@ typedef struct Checker {
 	CheckerInfo info;
 
 	AstFile *              curr_ast_file;
-	BaseTypeSizes          sizes;
 	Scope *                global_scope;
 	Array(ProcedureInfo)   procs; // NOTE(bill): Procedures to check
 	Array(DelayedDecl)     delayed_imports;
 	Array(DelayedDecl)     delayed_foreign_libraries;
 
-
 	gbArena                arena;
 	gbArena                tmp_arena;
 	gbAllocator            allocator;
@@ -591,7 +589,8 @@ void add_global_string_constant(gbAllocator a, String name, String value) {
 }
 
 
-void init_universal_scope(BuildContext *bc) {
+void init_universal_scope(void) {
+	BuildContext *bc = &build_context;
 	// NOTE(bill): No need to free these
 	gbAllocator a = heap_allocator();
 	universal_scope = make_scope(NULL, a);
@@ -650,7 +649,7 @@ void init_checker_info(CheckerInfo *i) {
 	map_scope_init(&i->scopes,         a);
 	map_decl_info_init(&i->entities,   a);
 	map_expr_info_init(&i->untyped,    a);
-	map_entity_init(&i->foreign_procs, a);
+	map_entity_init(&i->foreigns,      a);
 	map_isize_init(&i->type_info_map,  a);
 	map_ast_file_init(&i->files,       a);
 	i->type_info_count = 0;
@@ -664,7 +663,7 @@ void destroy_checker_info(CheckerInfo *i) {
 	map_scope_destroy(&i->scopes);
 	map_decl_info_destroy(&i->entities);
 	map_expr_info_destroy(&i->untyped);
-	map_entity_destroy(&i->foreign_procs);
+	map_entity_destroy(&i->foreigns);
 	map_isize_destroy(&i->type_info_map);
 	map_ast_file_destroy(&i->files);
 }
@@ -675,8 +674,6 @@ void init_checker(Checker *c, Parser *parser, BuildContext *bc) {
 
 	c->parser = parser;
 	init_checker_info(&c->info);
-	c->sizes.word_size = bc->word_size;
-	c->sizes.max_align = bc->max_align;
 
 	array_init(&c->proc_stack, a);
 	array_init(&c->procs, a);
@@ -1384,8 +1381,14 @@ void check_collect_entities(Checker *c, AstNodeArray nodes, bool is_file_scope)
 					di->entities = entities;
 					di->type_expr = vd->type;
 					di->init_expr = vd->values.e[0];
+
+
+					if (vd->flags & VarDeclFlag_thread_local) {
+						error_node(decl, "#thread_local variable declarations cannot have initialization values");
+					}
 				}
 
+
 				for_array(i, vd->names) {
 					AstNode *name = vd->names.e[i];
 					AstNode *value = NULL;
@@ -1397,8 +1400,9 @@ void check_collect_entities(Checker *c, AstNodeArray nodes, bool is_file_scope)
 						continue;
 					}
 					Entity *e = make_entity_variable(c->allocator, c->context.scope, name->Ident, NULL, vd->flags & VarDeclFlag_immutable);
-					e->Variable.is_thread_local = vd->flags & VarDeclFlag_thread_local;
+					e->Variable.is_thread_local = (vd->flags & VarDeclFlag_thread_local) != 0;
 					e->identifier = name;
+
 					if (vd->flags & VarDeclFlag_using) {
 						vd->flags &= ~VarDeclFlag_using; // NOTE(bill): This error will be only caught once
 						error_node(name, "`using` is not allowed at the file scope");
@@ -1883,7 +1887,7 @@ void check_parsed_files(Checker *c) {
 		if (e->kind == Entity_TypeName) {
 			if (e->type != NULL) {
 				// i64 size  = type_size_of(c->sizes, c->allocator, e->type);
-				i64 align = type_align_of(c->sizes, c->allocator, e->type);
+				i64 align = type_align_of(c->allocator, e->type);
 			}
 		}
 	}

+ 46 - 48
src/ir.c

@@ -17,8 +17,6 @@ typedef Array(irValue *) irValueArray;
 
 typedef struct irModule {
 	CheckerInfo * info;
-	BuildContext *build_context;
-	BaseTypeSizes sizes;
 	gbArena       arena;
 	gbArena       tmp_arena;
 	gbAllocator   allocator;
@@ -326,6 +324,7 @@ typedef struct irValueGlobal {
 	bool          is_constant;
 	bool          is_private;
 	bool          is_thread_local;
+	bool          is_foreign;
 	bool          is_unnamed_addr;
 } irValueGlobal;
 
@@ -1438,9 +1437,9 @@ irValue *ir_gen_map_header(irProcedure *proc, irValue *map_val, Type *map_type)
 		ir_emit_store(proc, ir_emit_struct_ep(proc, h, 1), v_true);
 	}
 
-	i64 entry_size = type_size_of(proc->module->sizes, a, map_type->Map.entry_type);
-	i64 entry_align = type_align_of(proc->module->sizes, a, map_type->Map.entry_type);
-	i64 value_offset = type_offset_of(proc->module->sizes, a, map_type->Map.entry_type, 2);
+	i64 entry_size = type_size_of(a, map_type->Map.entry_type);
+	i64 entry_align = type_align_of(a, map_type->Map.entry_type);
+	i64 value_offset = type_offset_of(a, map_type->Map.entry_type, 2);
 	ir_emit_store(proc, ir_emit_struct_ep(proc, h, 2), ir_make_const_int(a, entry_size));
 	ir_emit_store(proc, ir_emit_struct_ep(proc, h, 3), ir_make_const_int(a, entry_align));
 	ir_emit_store(proc, ir_emit_struct_ep(proc, h, 4), ir_make_const_int(a, value_offset));
@@ -1460,7 +1459,7 @@ irValue *ir_gen_map_key(irProcedure *proc, irValue *key, Type *key_type) {
 		ir_emit_store(proc, ir_emit_struct_ep(proc, v, 0), ir_emit_conv(proc, p, t_u64));
 	} else if (is_type_float(t)) {
 		irValue *bits = NULL;
-		i64 size = type_size_of(proc->module->sizes, proc->module->allocator, t);
+		i64 size = type_size_of(proc->module->allocator, t);
 		switch (8*size) {
 		case 32: bits = ir_emit_bitcast(proc, key, t_u32); break;
 		case 64: bits = ir_emit_bitcast(proc, key, t_u64); break;
@@ -1660,7 +1659,7 @@ irValue *ir_emit_arith(irProcedure *proc, TokenKind op, irValue *left, irValue *
 			irModule *m = proc->module;
 			Type *ptr_type = base_type(t_left);
 			GB_ASSERT(!is_type_rawptr(ptr_type));
-			irValue *elem_size = ir_make_const_int(m->allocator, type_size_of(m->sizes, m->allocator, ptr_type->Pointer.elem));
+			irValue *elem_size = ir_make_const_int(m->allocator, type_size_of(m->allocator, ptr_type->Pointer.elem));
 			irValue *x = ir_emit_conv(proc, left, type);
 			irValue *y = ir_emit_conv(proc, right, type);
 			irValue *diff = ir_emit_arith(proc, op, x, y, type);
@@ -1675,7 +1674,7 @@ irValue *ir_emit_arith(irProcedure *proc, TokenKind op, irValue *left, irValue *
 		left = ir_emit_conv(proc, left, type);
 		if (!is_type_unsigned(ir_type(right))) {
 			Type *t = t_u64;
-			if (proc->module->sizes.word_size == 32) {
+			if (build_context.word_size == 32) {
 				t = t_u32;
 			}
 			right = ir_emit_conv(proc, right, t);
@@ -2189,8 +2188,8 @@ irValue *ir_emit_conv(irProcedure *proc, irValue *value, Type *t) {
 	if (is_type_integer(src) && is_type_integer(dst)) {
 		GB_ASSERT(src->kind == Type_Basic &&
 		          dst->kind == Type_Basic);
-		i64 sz = type_size_of(proc->module->sizes, proc->module->allocator, src);
-		i64 dz = type_size_of(proc->module->sizes, proc->module->allocator, dst);
+		i64 sz = type_size_of(proc->module->allocator, src);
+		i64 dz = type_size_of(proc->module->allocator, dst);
 		irConvKind kind = irConv_trunc;
 		if (sz == dz) {
 			// NOTE(bill): In LLVM, all integers are signed and rely upon 2's compliment
@@ -2216,8 +2215,8 @@ irValue *ir_emit_conv(irProcedure *proc, irValue *value, Type *t) {
 
 	// float -> float
 	if (is_type_float(src) && is_type_float(dst)) {
-		i64 sz = type_size_of(proc->module->sizes, proc->module->allocator, src);
-		i64 dz = type_size_of(proc->module->sizes, proc->module->allocator, dst);
+		i64 sz = type_size_of(proc->module->allocator, src);
+		i64 dz = type_size_of(proc->module->allocator, dst);
 		irConvKind kind = irConv_fptrunc;
 		if (dz >= sz) {
 			kind = irConv_fpext;
@@ -2432,8 +2431,8 @@ irValue *ir_emit_transmute(irProcedure *proc, irValue *value, Type *t) {
 
 	irModule *m = proc->module;
 
-	i64 sz = type_size_of(m->sizes, m->allocator, src);
-	i64 dz = type_size_of(m->sizes, m->allocator, dst);
+	i64 sz = type_size_of(m->allocator, src);
+	i64 dz = type_size_of(m->allocator, dst);
 
 	GB_ASSERT_MSG(sz == dz, "Invalid transmute conversion: `%s` to `%s`", type_to_string(src_type), type_to_string(t));
 
@@ -2457,7 +2456,7 @@ irValue *ir_emit_down_cast(irProcedure *proc, irValue *value, Type *t) {
 	Selection sel = lookup_field(proc->module->allocator, t, field_name, false);
 	irValue *bytes = ir_emit_conv(proc, value, t_u8_ptr);
 
-	i64 offset_ = type_offset_of_from_selection(proc->module->sizes, allocator, type_deref(t), sel);
+	i64 offset_ = type_offset_of_from_selection(allocator, type_deref(t), sel);
 	irValue *offset = ir_make_const_int(allocator, -offset_);
 	irValue *head = ir_emit_ptr_offset(proc, bytes, offset);
 	return ir_emit_conv(proc, head, t);
@@ -3072,8 +3071,8 @@ irValue *ir_build_single_expr(irProcedure *proc, AstNode *expr, TypeAndValue *tv
 					Type *type = type_of_expr(proc->module->info, ce->args.e[0]);
 					Type *ptr_type = make_type_pointer(allocator, type);
 
-					i64 s = type_size_of(proc->module->sizes, allocator, type);
-					i64 a = type_align_of(proc->module->sizes, allocator, type);
+					i64 s = type_size_of(allocator, type);
+					i64 a = type_align_of(allocator, type);
 
 					irValue **args = gb_alloc_array(allocator, irValue *, 2);
 					args[0] = ir_make_const_int(allocator, s);
@@ -3092,8 +3091,8 @@ irValue *ir_build_single_expr(irProcedure *proc, AstNode *expr, TypeAndValue *tv
 					Type *ptr_type = make_type_pointer(allocator, type);
 					Type *slice_type = make_type_slice(allocator, type);
 
-					i64 s = type_size_of(proc->module->sizes, allocator, type);
-					i64 a = type_align_of(proc->module->sizes, allocator, type);
+					i64 s = type_size_of(allocator, type);
+					i64 a = type_align_of(allocator, type);
 
 					irValue *elem_size  = ir_make_const_int(allocator, s);
 					irValue *elem_align = ir_make_const_int(allocator, a);
@@ -3207,8 +3206,8 @@ irValue *ir_build_single_expr(irProcedure *proc, AstNode *expr, TypeAndValue *tv
 					if (is_type_dynamic_array(type)) {
 						Type *elem = type->DynamicArray.elem;
 
-						irValue *elem_size  = ir_make_const_int(a, type_size_of(proc->module->sizes, a, elem));
-						irValue *elem_align = ir_make_const_int(a, type_align_of(proc->module->sizes, a, elem));
+						irValue *elem_size  = ir_make_const_int(a, type_size_of(a, elem));
+						irValue *elem_align = ir_make_const_int(a, type_align_of(a, elem));
 
 						ptr = ir_emit_conv(proc, ptr, t_rawptr);
 
@@ -3257,8 +3256,8 @@ irValue *ir_build_single_expr(irProcedure *proc, AstNode *expr, TypeAndValue *tv
 					GB_ASSERT(is_type_dynamic_array(type));
 					Type *elem_type = type->DynamicArray.elem;
 
-					irValue *elem_size  = ir_make_const_int(a, type_size_of(proc->module->sizes, a, elem_type));
-					irValue *elem_align = ir_make_const_int(a, type_align_of(proc->module->sizes, a, elem_type));
+					irValue *elem_size  = ir_make_const_int(a, type_size_of(a, elem_type));
+					irValue *elem_align = ir_make_const_int(a, type_align_of(a, elem_type));
 
 					array_ptr = ir_emit_conv(proc, array_ptr, t_rawptr);
 
@@ -3401,7 +3400,7 @@ irValue *ir_build_single_expr(irProcedure *proc, AstNode *expr, TypeAndValue *tv
 					Type *slice_type = base_type(ir_type(dst_slice));
 					GB_ASSERT(slice_type->kind == Type_Slice);
 					Type *elem_type = slice_type->Slice.elem;
-					i64 size_of_elem = type_size_of(proc->module->sizes, proc->module->allocator, elem_type);
+					i64 size_of_elem = type_size_of(proc->module->allocator, elem_type);
 
 					irValue *dst = ir_emit_conv(proc, ir_slice_elem(proc, dst_slice), t_rawptr);
 					irValue *src = ir_emit_conv(proc, ir_slice_elem(proc, src_slice), t_rawptr);
@@ -3473,7 +3472,7 @@ irValue *ir_build_single_expr(irProcedure *proc, AstNode *expr, TypeAndValue *tv
 						return ir_emit_conv(proc, s, tv->type);
 					}
 					irValue *slice = ir_add_local_generated(proc, tv->type);
-					i64 elem_size = type_size_of(proc->module->sizes, proc->module->allocator, t->Slice.elem);
+					i64 elem_size = type_size_of(proc->module->allocator, t->Slice.elem);
 
 					irValue *ptr   = ir_emit_conv(proc, ir_slice_elem(proc, s), t_u8_ptr);
 					irValue *count = ir_slice_count(proc, s);
@@ -4194,8 +4193,8 @@ irAddr ir_build_addr(irProcedure *proc, AstNode *expr) {
 			}
 			Type *elem = bt->DynamicArray.elem;
 			gbAllocator a = proc->module->allocator;
-			irValue *size  = ir_make_const_int(a, type_size_of(proc->module->sizes, a, elem));
-			irValue *align = ir_make_const_int(a, type_align_of(proc->module->sizes, a, elem));
+			irValue *size  = ir_make_const_int(a, type_size_of(a, elem));
+			irValue *align = ir_make_const_int(a, type_align_of(a, elem));
 			{
 				irValue **args = gb_alloc_array(a, irValue *, 4);
 				args[0] = ir_emit_conv(proc, v, t_rawptr);
@@ -5670,7 +5669,7 @@ void ir_module_add_value(irModule *m, Entity *e, irValue *v) {
 	map_ir_value_set(&m->values, hash_pointer(e), v);
 }
 
-void ir_init_module(irModule *m, Checker *c, BuildContext *build_context) {
+void ir_init_module(irModule *m, Checker *c) {
 	// TODO(bill): Determine a decent size for the arena
 	isize token_count = c->parser->total_token_count;
 	isize arena_size = 4 * token_count * gb_size_of(irValue);
@@ -5679,8 +5678,6 @@ void ir_init_module(irModule *m, Checker *c, BuildContext *build_context) {
 	m->allocator     = gb_arena_allocator(&m->arena);
 	m->tmp_allocator = gb_arena_allocator(&m->tmp_arena);
 	m->info = &c->info;
-	m->sizes = c->sizes;
-	m->build_context = build_context;
 
 	map_ir_value_init(&m->values,  heap_allocator());
 	map_ir_value_init(&m->members, heap_allocator());
@@ -5785,7 +5782,7 @@ void ir_destroy_module(irModule *m) {
 ////////////////////////////////////////////////////////////////
 
 
-bool ir_gen_init(irGen *s, Checker *c, BuildContext *build_context) {
+bool ir_gen_init(irGen *s, Checker *c) {
 	if (global_error_collector.count != 0) {
 		return false;
 	}
@@ -5795,7 +5792,7 @@ bool ir_gen_init(irGen *s, Checker *c, BuildContext *build_context) {
 		return false;
 	}
 
-	ir_init_module(&s->module, c, build_context);
+	ir_init_module(&s->module, c);
 	s->module.generate_debug_info = false;
 
 	// TODO(bill): generate appropriate output name
@@ -5974,6 +5971,7 @@ void ir_gen_tree(irGen *s) {
 		if (!scope->is_global) {
 			if (e->kind == Entity_Procedure && (e->Procedure.tags & ProcTag_export) != 0) {
 			} else if (e->kind == Entity_Procedure && e->Procedure.link_name.len > 0) {
+				// Handle later
 			} else if (scope->is_init && e->kind == Entity_Procedure && str_eq(name, str_lit("main"))) {
 			} else {
 				name = ir_mangle_name(s, e->token.pos.file, e);
@@ -6079,7 +6077,7 @@ void ir_gen_tree(irGen *s) {
 	}
 
 #if defined(GB_SYSTEM_WINDOWS)
-	if (m->build_context->is_dll && !has_dll_main) {
+	if (build_context.is_dll && !has_dll_main) {
 		// DllMain :: proc(inst: rawptr, reason: u32, reserved: rawptr) -> i32
 		String name = str_lit("DllMain");
 		Type *proc_params = make_type_tuple(a);
@@ -6306,7 +6304,7 @@ void ir_gen_tree(irGen *s) {
 					case Basic_uint: {
 						tag = ir_emit_conv(proc, ti_ptr, t_type_info_integer_ptr);
 						bool is_unsigned = (t->Basic.flags & BasicFlag_Unsigned) != 0;
-						irValue *bits = ir_make_const_int(a, type_size_of(m->sizes, a, t));
+						irValue *bits = ir_make_const_int(a, type_size_of(a, t));
 						irValue *is_signed = ir_make_const_bool(a, !is_unsigned);
 						ir_emit_store(proc, ir_emit_struct_ep(proc, tag, 0), bits);
 						ir_emit_store(proc, ir_emit_struct_ep(proc, tag, 1), is_signed);
@@ -6318,7 +6316,7 @@ void ir_gen_tree(irGen *s) {
 					// case Basic_f128:
 					{
 						tag = ir_emit_conv(proc, ti_ptr, t_type_info_float_ptr);
-						irValue *bits = ir_make_const_int(a, type_size_of(m->sizes, a, t));
+						irValue *bits = ir_make_const_int(a, type_size_of(a, t));
 						ir_emit_store(proc, ir_emit_struct_ep(proc, tag, 0), bits);
 					} break;
 
@@ -6348,7 +6346,7 @@ void ir_gen_tree(irGen *s) {
 					irValue *gep = ir_get_type_info_ptr(proc, type_info_data, t->Array.elem);
 					ir_emit_store(proc, ir_emit_struct_ep(proc, tag, 0), gep);
 
-					isize ez = type_size_of(m->sizes, a, t->Array.elem);
+					isize ez = type_size_of(a, t->Array.elem);
 					irValue *elem_size = ir_emit_struct_ep(proc, tag, 1);
 					ir_emit_store(proc, elem_size, ir_make_const_int(a, ez));
 
@@ -6362,7 +6360,7 @@ void ir_gen_tree(irGen *s) {
 					irValue *gep = ir_get_type_info_ptr(proc, type_info_data, t->DynamicArray.elem);
 					ir_emit_store(proc, ir_emit_struct_ep(proc, tag, 0), gep);
 
-					isize ez = type_size_of(m->sizes, a, t->DynamicArray.elem);
+					isize ez = type_size_of(a, t->DynamicArray.elem);
 					irValue *elem_size = ir_emit_struct_ep(proc, tag, 1);
 					ir_emit_store(proc, elem_size, ir_make_const_int(a, ez));
 				} break;
@@ -6372,7 +6370,7 @@ void ir_gen_tree(irGen *s) {
 					irValue *gep = ir_get_type_info_ptr(proc, type_info_data, t->Slice.elem);
 					ir_emit_store(proc, ir_emit_struct_ep(proc, tag, 0), gep);
 
-					isize ez = type_size_of(m->sizes, a, t->Slice.elem);
+					isize ez = type_size_of(a, t->Slice.elem);
 					irValue *elem_size = ir_emit_struct_ep(proc, tag, 1);
 					ir_emit_store(proc, elem_size, ir_make_const_int(a, ez));
 				} break;
@@ -6382,10 +6380,10 @@ void ir_gen_tree(irGen *s) {
 					irValue *gep = ir_get_type_info_ptr(proc, type_info_data, t->Vector.elem);
 					ir_emit_store(proc, ir_emit_struct_ep(proc, tag, 0), gep);
 
-					isize ez = type_size_of(m->sizes, a, t->Vector.elem);
+					isize ez = type_size_of(a, t->Vector.elem);
 					ir_emit_store(proc, ir_emit_struct_ep(proc, tag, 1), ir_make_const_int(a, ez));
 					ir_emit_store(proc, ir_emit_struct_ep(proc, tag, 2), ir_make_const_int(a, t->Vector.count));
-					ir_emit_store(proc, ir_emit_struct_ep(proc, tag, 3), ir_make_const_int(a, type_align_of(m->sizes, a, t)));
+					ir_emit_store(proc, ir_emit_struct_ep(proc, tag, 3), ir_make_const_int(a, type_align_of(a, t)));
 
 				} break;
 				case Type_Proc: {
@@ -6413,7 +6411,7 @@ void ir_gen_tree(irGen *s) {
 					tag = ir_emit_conv(proc, ti_ptr, t_type_info_tuple_ptr);
 
 					{
-						irValue *align = ir_make_const_int(a, type_align_of(m->sizes, a, t));
+						irValue *align = ir_make_const_int(a, type_align_of(a, t));
 						ir_emit_store(proc, ir_emit_struct_ep(proc, tag, 4), align);
 					}
 
@@ -6444,8 +6442,8 @@ void ir_gen_tree(irGen *s) {
 						tag = ir_emit_conv(proc, ti_ptr, t_type_info_struct_ptr);
 
 						{
-							irValue *size         = ir_make_const_int(a,  type_size_of(m->sizes, a, t));
-							irValue *align        = ir_make_const_int(a,  type_align_of(m->sizes, a, t));
+							irValue *size         = ir_make_const_int(a,  type_size_of(a, t));
+							irValue *align        = ir_make_const_int(a,  type_align_of(a, t));
 							irValue *packed       = ir_make_const_bool(a, t->Record.struct_is_packed);
 							irValue *ordered      = ir_make_const_bool(a, t->Record.struct_is_ordered);
 							irValue *custom_align = ir_make_const_bool(a, t->Record.custom_align);
@@ -6460,7 +6458,7 @@ void ir_gen_tree(irGen *s) {
 						irValue *memory_names   = ir_type_info_member_offset(proc, type_info_member_names,   t->Record.field_count, &type_info_member_names_index);
 						irValue *memory_offsets = ir_type_info_member_offset(proc, type_info_member_offsets, t->Record.field_count, &type_info_member_offsets_index);
 
-						type_set_offsets(m->sizes, a, t); // NOTE(bill): Just incase the offsets have not been set yet
+						type_set_offsets(a, t); // NOTE(bill): Just incase the offsets have not been set yet
 						for (isize source_index = 0; source_index < t->Record.field_count; source_index++) {
 							// TODO(bill): Order fields in source order not layout order
 							Entity *f = t->Record.fields_in_src_order[source_index];
@@ -6488,8 +6486,8 @@ void ir_gen_tree(irGen *s) {
 						ir_emit_comment(proc, str_lit("Type_Info_Union"));
 						tag = ir_emit_conv(proc, ti_ptr, t_type_info_union_ptr);
 						{
-							irValue *size    = ir_make_const_int(a, type_size_of(m->sizes, a, t));
-							irValue *align   = ir_make_const_int(a, type_align_of(m->sizes, a, t));
+							irValue *size    = ir_make_const_int(a, type_size_of(a, t));
+							irValue *align   = ir_make_const_int(a, type_align_of(a, t));
 							ir_emit_store(proc, ir_emit_struct_ep(proc, tag, 3),  size);
 							ir_emit_store(proc, ir_emit_struct_ep(proc, tag, 4),  align);
 						}
@@ -6498,8 +6496,8 @@ void ir_gen_tree(irGen *s) {
 						ir_emit_comment(proc, str_lit("Type_Info_RawUnion"));
 						tag = ir_emit_conv(proc, ti_ptr, t_type_info_raw_union_ptr);
 						{
-							irValue *size    = ir_make_const_int(a, type_size_of(m->sizes, a, t));
-							irValue *align   = ir_make_const_int(a, type_align_of(m->sizes, a, t));
+							irValue *size    = ir_make_const_int(a, type_size_of(a, t));
+							irValue *align   = ir_make_const_int(a, type_align_of(a, t));
 							ir_emit_store(proc, ir_emit_struct_ep(proc, tag, 3),  size);
 							ir_emit_store(proc, ir_emit_struct_ep(proc, tag, 4),  align);
 						}

+ 19 - 15
src/ir_print.c

@@ -137,8 +137,7 @@ void ir_print_encoded_global(irFileBuffer *f, String name, bool remove_prefix) {
 
 
 void ir_print_type(irFileBuffer *f, irModule *m, Type *t) {
-	BaseTypeSizes s = m->sizes;
-	i64 word_bits = 8*s.word_size;
+	i64 word_bits = 8*build_context.word_size;
 	GB_ASSERT_NOT_NULL(t);
 	t = default_type(t);
 	GB_ASSERT(is_type_typed(t));
@@ -178,7 +177,7 @@ void ir_print_type(irFileBuffer *f, irModule *m, Type *t) {
 		ir_fprintf(f, "]");
 		return;
 	case Type_Vector: {
-		i64 align = type_align_of(s, heap_allocator(), t);
+		i64 align = type_align_of(heap_allocator(), t);
 		i64 count = t->Vector.count;
 		ir_fprintf(f, "{[0 x <%lld x i8>], [%lld x ", align, count);
 		ir_print_type(f, m, t->Vector.elem);
@@ -228,15 +227,15 @@ void ir_print_type(irFileBuffer *f, irModule *m, Type *t) {
 		case TypeRecord_Union: {
 			// NOTE(bill): The zero size array is used to fix the alignment used in a structure as
 			// LLVM takes the first element's alignment as the entire alignment (like C)
-			i64 size_of_union  = type_size_of(s, heap_allocator(), t) - s.word_size;
-			i64 align_of_union = type_align_of(s, heap_allocator(), t);
+			i64 size_of_union  = type_size_of(heap_allocator(), t) - build_context.word_size;
+			i64 align_of_union = type_align_of(heap_allocator(), t);
 			ir_fprintf(f, "{[0 x <%lld x i8>], [%lld x i8], i%lld}", align_of_union, size_of_union, word_bits);
 		} return;
 		case TypeRecord_RawUnion: {
 			// NOTE(bill): The zero size array is used to fix the alignment used in a structure as
 			// LLVM takes the first element's alignment as the entire alignment (like C)
-			i64 size_of_union  = type_size_of(s, heap_allocator(), t);
-			i64 align_of_union = type_align_of(s, heap_allocator(), t);
+			i64 size_of_union  = type_size_of(heap_allocator(), t);
+			i64 align_of_union = type_align_of(heap_allocator(), t);
 			ir_fprintf(f, "{[0 x <%lld x i8>], [%lld x i8]}", align_of_union, size_of_union);
 		} return;
 		case TypeRecord_Enum:
@@ -455,7 +454,7 @@ void ir_print_exact_value(irFileBuffer *f, irModule *m, ExactValue value, Type *
 				break;
 			}
 
-			i64 align = type_align_of(m->sizes, m->allocator, type);
+			i64 align = type_align_of(m->allocator, type);
 			i64 count = type->Vector.count;
 			Type *elem_type = type->Vector.elem;
 
@@ -681,7 +680,7 @@ void ir_print_instr(irFileBuffer *f, irModule *m, irValue *value) {
 		Type *type = instr->Local.entity->type;
 		ir_fprintf(f, "%%%d = alloca ", value->index);
 		ir_print_type(f, m, type);
-		ir_fprintf(f, ", align %lld\n", type_align_of(m->sizes, m->allocator, type));
+		ir_fprintf(f, ", align %lld\n", type_align_of(m->allocator, type));
 	} break;
 
 	case irInstr_ZeroInit: {
@@ -714,7 +713,7 @@ void ir_print_instr(irFileBuffer *f, irModule *m, irValue *value) {
 		ir_print_type(f, m, type);
 		ir_fprintf(f, "* ");
 		ir_print_value(f, m, instr->Load.address, type);
-		ir_fprintf(f, ", align %lld\n", type_align_of(m->sizes, m->allocator, type));
+		ir_fprintf(f, ", align %lld\n", type_align_of(m->allocator, type));
 	} break;
 
 	case irInstr_ArrayElementPtr: {
@@ -1273,7 +1272,7 @@ void ir_print_proc(irFileBuffer *f, irModule *m, irProcedure *proc) {
 	} else {
 		ir_fprintf(f, "\n");
 		ir_fprintf(f, "define ");
-		if (m->build_context->is_dll) {
+		if (build_context.is_dll) {
 			// if (proc->tags & (ProcTag_export|ProcTag_dll_export)) {
 			if (proc->tags & (ProcTag_export)) {
 				ir_fprintf(f, "dllexport ");
@@ -1449,6 +1448,9 @@ void print_llvm_ir(irGen *ir) {
 		}
 		ir_print_encoded_global(f, g->entity->token.string, in_global_scope);
 		ir_fprintf(f, " = ");
+		if (g->is_foreign) {
+			ir_fprintf(f, "external ");
+		}
 		if (g->is_thread_local) {
 			ir_fprintf(f, "thread_local ");
 		}
@@ -1468,10 +1470,12 @@ void print_llvm_ir(irGen *ir) {
 
 		ir_print_type(f, m, g->entity->type);
 		ir_fprintf(f, " ");
-		if (g->value != NULL) {
-			ir_print_value(f, m, g->value, g->entity->type);
-		} else {
-			ir_fprintf(f, "zeroinitializer");
+		if (!g->is_foreign) {
+			if (g->value != NULL) {
+				ir_print_value(f, m, g->value, g->entity->type);
+			} else {
+				ir_fprintf(f, "zeroinitializer");
+			}
 		}
 		ir_fprintf(f, "\n");
 	}

+ 4 - 5
src/main.c

@@ -4,7 +4,7 @@ extern "C" {
 
 #include "common.c"
 #include "timings.c"
-#include "build.c"
+#include "build_settings.c"
 #include "tokenizer.c"
 #include "parser.c"
 // #include "printer.c"
@@ -145,10 +145,9 @@ int main(int argc, char **argv) {
 
 #if 1
 
-	BuildContext build_context = {0};
-	init_build_context(&build_context);
+	init_build_context();
 
-	init_universal_scope(&build_context);
+	init_universal_scope();
 
 	char *init_filename = NULL;
 	bool run_output = false;
@@ -222,7 +221,7 @@ int main(int argc, char **argv) {
 #if 1
 
 	irGen ir_gen = {0};
-	if (!ir_gen_init(&ir_gen, &checker, &build_context)) {
+	if (!ir_gen_init(&ir_gen, &checker)) {
 		return 1;
 	}
 	// defer (ssa_gen_destroy(&ir_gen));

+ 5 - 9
src/parser.c

@@ -84,9 +84,9 @@ typedef enum ProcCallingConvention {
 } ProcCallingConvention;
 
 typedef enum VarDeclFlag {
-	VarDeclFlag_thread_local = 1<<0,
-	VarDeclFlag_using        = 1<<1,
-	VarDeclFlag_immutable    = 1<<2,
+	VarDeclFlag_using            = 1<<0,
+	VarDeclFlag_immutable        = 1<<1,
+	VarDeclFlag_thread_local     = 1<<2,
 } VarDeclFlag;
 
 typedef enum StmtStateFlag {
@@ -493,15 +493,11 @@ Token ast_node_token(AstNode *node) {
 	case AstNode_ImportDecl:     return node->ImportDecl.token;
 	case AstNode_ForeignLibrary: return node->ForeignLibrary.token;
 
-
-	case AstNode_Field: {
+	case AstNode_Field:
 		if (node->Field.names.count > 0) {
 			return ast_node_token(node->Field.names.e[0]);
-		} else {
-			return ast_node_token(node->Field.type);
 		}
-	}
-
+		return ast_node_token(node->Field.type);
 
 	case AstNode_HelperType:       return node->HelperType.token;
 	case AstNode_ProcType:         return node->ProcType.token;

+ 75 - 86
src/types.c

@@ -178,19 +178,6 @@ typedef struct Type {
 	bool failure;
 } Type;
 
-// NOTE(bill): Internal sizes of certain types
-// string: 2*word_size  (ptr+len)
-// slice:  3*word_size  (ptr+len+cap)
-// array:  count*size_of(elem) aligned
-
-// NOTE(bill): Alignment of structures and other types are to be compatible with C
-
-typedef struct BaseTypeSizes {
-	i64 word_size;
-	i64 max_align;
-} BaseTypeSizes;
-
-
 typedef Array(i32) Array_i32;
 
 typedef struct Selection {
@@ -350,6 +337,10 @@ gb_global Type *t_map_header            = NULL;
 
 
 
+i64 type_size_of  (gbAllocator allocator, Type *t);
+i64 type_align_of (gbAllocator allocator, Type *t);
+i64 type_offset_of(gbAllocator allocator, Type *t, i32 index);
+
 
 
 gbString type_to_string(Type *type);
@@ -972,6 +963,8 @@ bool is_type_cte_safe(Type *type) {
 
 	case Type_DynamicArray:
 		return false;
+	case Type_Map:
+		return false;
 
 	case Type_Vector: // NOTE(bill): This should always to be true but this is for sanity reasons
 		return is_type_cte_safe(type->Vector.elem);
@@ -1463,12 +1456,8 @@ void type_path_pop(TypePath *tp) {
 #define FAILURE_ALIGNMENT 0
 
 
-i64 type_size_of(BaseTypeSizes s, gbAllocator allocator, Type *t);
-i64 type_align_of(BaseTypeSizes s, gbAllocator allocator, Type *t);
-i64 type_offset_of(BaseTypeSizes s, gbAllocator allocator, Type *t, i32 index);
-
-i64 type_size_of_internal (BaseTypeSizes s, gbAllocator allocator, Type *t, TypePath *path);
-i64 type_align_of_internal(BaseTypeSizes s, gbAllocator allocator, Type *t, TypePath *path);
+i64 type_size_of_internal (gbAllocator allocator, Type *t, TypePath *path);
+i64 type_align_of_internal(gbAllocator allocator, Type *t, TypePath *path);
 
 i64 align_formula(i64 size, i64 align) {
 	if (align > 0) {
@@ -1478,32 +1467,32 @@ i64 align_formula(i64 size, i64 align) {
 	return size;
 }
 
-i64 type_size_of(BaseTypeSizes s, gbAllocator allocator, Type *t) {
+i64 type_size_of(gbAllocator allocator, Type *t) {
 	if (t == NULL) {
 		return 0;
 	}
 	i64 size;
 	TypePath path = {0};
 	type_path_init(&path);
-	size = type_size_of_internal(s, allocator, t, &path);
+	size = type_size_of_internal(allocator, t, &path);
 	type_path_free(&path);
 	return size;
 }
 
-i64 type_align_of(BaseTypeSizes s, gbAllocator allocator, Type *t) {
+i64 type_align_of(gbAllocator allocator, Type *t) {
 	if (t == NULL) {
 		return 1;
 	}
 	i64 align;
 	TypePath path = {0};
 	type_path_init(&path);
-	align = type_align_of_internal(s, allocator, t, &path);
+	align = type_align_of_internal(allocator, t, &path);
 	type_path_free(&path);
 	return align;
 }
 
 
-i64 type_align_of_internal(BaseTypeSizes s, gbAllocator allocator, Type *t, TypePath *path) {
+i64 type_align_of_internal(gbAllocator allocator, Type *t, TypePath *path) {
 	if (t->failure) {
 		return FAILURE_ALIGNMENT;
 	}
@@ -1513,11 +1502,11 @@ i64 type_align_of_internal(BaseTypeSizes s, gbAllocator allocator, Type *t, Type
 	case Type_Basic: {
 		GB_ASSERT(is_type_typed(t));
 		switch (t->kind) {
-		case Basic_string: return s.word_size;
-		case Basic_any:    return s.word_size;
+		case Basic_string: return build_context.word_size;
+		case Basic_any:    return build_context.word_size;
 
 		case Basic_int: case Basic_uint: case Basic_rawptr:
-			return s.word_size;
+			return build_context.word_size;
 		}
 	} break;
 
@@ -1527,17 +1516,17 @@ i64 type_align_of_internal(BaseTypeSizes s, gbAllocator allocator, Type *t, Type
 		if (path->failure) {
 			return FAILURE_ALIGNMENT;
 		}
-		i64 align = type_align_of_internal(s, allocator, t->Array.elem, path);
+		i64 align = type_align_of_internal(allocator, t->Array.elem, path);
 		type_path_pop(path);
 		return align;
 	}
 
 	case Type_DynamicArray:
 		// data, count, capacity, allocator
-		return s.word_size;
+		return build_context.word_size;
 
 	case Type_Slice:
-		return s.word_size;
+		return build_context.word_size;
 
 	case Type_Vector: {
 		Type *elem = t->Vector.elem;
@@ -1545,17 +1534,17 @@ i64 type_align_of_internal(BaseTypeSizes s, gbAllocator allocator, Type *t, Type
 		if (path->failure) {
 			return FAILURE_ALIGNMENT;
 		}
-		i64 size = type_size_of_internal(s, allocator, t->Vector.elem, path);
+		i64 size = type_size_of_internal(allocator, t->Vector.elem, path);
 		type_path_pop(path);
 		i64 count = gb_max(prev_pow2(t->Vector.count), 1);
 		i64 total = size * count;
-		return gb_clamp(total, 1, s.max_align);
+		return gb_clamp(total, 1, build_context.max_align);
 	} break;
 
 	case Type_Tuple: {
 		i64 max = 1;
 		for (isize i = 0; i < t->Tuple.variable_count; i++) {
-			i64 align = type_align_of_internal(s, allocator, t->Tuple.variables[i]->type, path);
+			i64 align = type_align_of_internal(allocator, t->Tuple.variables[i]->type, path);
 			if (max < align) {
 				max = align;
 			}
@@ -1565,7 +1554,7 @@ i64 type_align_of_internal(BaseTypeSizes s, gbAllocator allocator, Type *t, Type
 
 	case Type_Map: {
 		if (t->Map.count == 0) { // Dynamic
-			return type_align_of_internal(s, allocator, t->Map.generated_struct_type, path);
+			return type_align_of_internal(allocator, t->Map.generated_struct_type, path);
 		}
 		GB_PANIC("TODO(bill): Fixed map alignment");
 	} break;
@@ -1574,19 +1563,19 @@ i64 type_align_of_internal(BaseTypeSizes s, gbAllocator allocator, Type *t, Type
 		switch (t->Record.kind) {
 		case TypeRecord_Struct:
 			if (t->Record.custom_align > 0) {
-				return gb_clamp(t->Record.custom_align, 1, s.max_align);
+				return gb_clamp(t->Record.custom_align, 1, build_context.max_align);
 			}
 			if (t->Record.field_count > 0) {
 				// TODO(bill): What is this supposed to be?
 				if (t->Record.struct_is_packed) {
-					i64 max = s.word_size;
+					i64 max = build_context.word_size;
 					for (isize i = 0; i < t->Record.field_count; i++) {
 						Type *field_type = t->Record.fields[i]->type;
 						type_path_push(path, field_type);
 						if (path->failure) {
 							return FAILURE_ALIGNMENT;
 						}
-						i64 align = type_align_of_internal(s, allocator, field_type, path);
+						i64 align = type_align_of_internal(allocator, field_type, path);
 						type_path_pop(path);
 						if (max < align) {
 							max = align;
@@ -1599,7 +1588,7 @@ i64 type_align_of_internal(BaseTypeSizes s, gbAllocator allocator, Type *t, Type
 				if (path->failure) {
 					return FAILURE_ALIGNMENT;
 				}
-				i64 align = type_align_of_internal(s, allocator, field_type, path);
+				i64 align = type_align_of_internal(allocator, field_type, path);
 				type_path_pop(path);
 				return align;
 			}
@@ -1613,7 +1602,7 @@ i64 type_align_of_internal(BaseTypeSizes s, gbAllocator allocator, Type *t, Type
 				if (path->failure) {
 					return FAILURE_ALIGNMENT;
 				}
-				i64 align = type_align_of_internal(s, allocator, field_type, path);
+				i64 align = type_align_of_internal(allocator, field_type, path);
 				type_path_pop(path);
 				if (max < align) {
 					max = align;
@@ -1629,7 +1618,7 @@ i64 type_align_of_internal(BaseTypeSizes s, gbAllocator allocator, Type *t, Type
 				if (path->failure) {
 					return FAILURE_ALIGNMENT;
 				}
-				i64 align = type_align_of_internal(s, allocator, field_type, path);
+				i64 align = type_align_of_internal(allocator, field_type, path);
 				type_path_pop(path);
 				if (max < align) {
 					max = align;
@@ -1641,43 +1630,43 @@ i64 type_align_of_internal(BaseTypeSizes s, gbAllocator allocator, Type *t, Type
 	} break;
 	}
 
-	// return gb_clamp(next_pow2(type_size_of(s, allocator, t)), 1, s.max_align);
-	// NOTE(bill): Things that are bigger than s.word_size, are actually comprised of smaller types
+	// return gb_clamp(next_pow2(type_size_of(allocator, t)), 1, build_context.max_align);
+	// NOTE(bill): Things that are bigger than build_context.word_size, are actually comprised of smaller types
 	// TODO(bill): Is this correct for 128-bit types (integers)?
-	return gb_clamp(next_pow2(type_size_of_internal(s, allocator, t, path)), 1, s.word_size);
+	return gb_clamp(next_pow2(type_size_of_internal(allocator, t, path)), 1, build_context.word_size);
 }
 
-i64 *type_set_offsets_of(BaseTypeSizes s, gbAllocator allocator, Entity **fields, isize field_count, bool is_packed) {
+i64 *type_set_offsets_of(gbAllocator allocator, Entity **fields, isize field_count, bool is_packed) {
 	i64 *offsets = gb_alloc_array(allocator, i64, field_count);
 	i64 curr_offset = 0;
 	if (is_packed) {
 		for (isize i = 0; i < field_count; i++) {
 			offsets[i] = curr_offset;
-			curr_offset += type_size_of(s, allocator, fields[i]->type);
+			curr_offset += type_size_of(allocator, fields[i]->type);
 		}
 
 	} else {
 		for (isize i = 0; i < field_count; i++) {
-			i64 align = type_align_of(s, allocator, fields[i]->type);
+			i64 align = type_align_of(allocator, fields[i]->type);
 			curr_offset = align_formula(curr_offset, align);
 			offsets[i] = curr_offset;
-			curr_offset += type_size_of(s, allocator, fields[i]->type);
+			curr_offset += type_size_of(allocator, fields[i]->type);
 		}
 	}
 	return offsets;
 }
 
-bool type_set_offsets(BaseTypeSizes s, gbAllocator allocator, Type *t) {
+bool type_set_offsets(gbAllocator allocator, Type *t) {
 	t = base_type(t);
 	if (is_type_struct(t)) {
 		if (!t->Record.struct_are_offsets_set) {
-			t->Record.struct_offsets = type_set_offsets_of(s, allocator, t->Record.fields, t->Record.field_count, t->Record.struct_is_packed);
+			t->Record.struct_offsets = type_set_offsets_of(allocator, t->Record.fields, t->Record.field_count, t->Record.struct_is_packed);
 			t->Record.struct_are_offsets_set = true;
 			return true;
 		}
 	} else if (is_type_tuple(t)) {
 		if (!t->Tuple.are_offsets_set) {
-			t->Tuple.offsets = type_set_offsets_of(s, allocator, t->Tuple.variables, t->Tuple.variable_count, false);
+			t->Tuple.offsets = type_set_offsets_of(allocator, t->Tuple.variables, t->Tuple.variable_count, false);
 			t->Tuple.are_offsets_set = true;
 			return true;
 		}
@@ -1687,7 +1676,7 @@ bool type_set_offsets(BaseTypeSizes s, gbAllocator allocator, Type *t) {
 	return false;
 }
 
-i64 type_size_of_internal(BaseTypeSizes s, gbAllocator allocator, Type *t, TypePath *path) {
+i64 type_size_of_internal(gbAllocator allocator, Type *t, TypePath *path) {
 	if (t->failure) {
 		return FAILURE_SIZE;
 	}
@@ -1701,11 +1690,11 @@ i64 type_size_of_internal(BaseTypeSizes s, gbAllocator allocator, Type *t, TypeP
 			return size;
 		}
 		switch (kind) {
-		case Basic_string: return 2*s.word_size;
-		case Basic_any:    return 2*s.word_size;
+		case Basic_string: return 2*build_context.word_size;
+		case Basic_any:    return 2*build_context.word_size;
 
 		case Basic_int: case Basic_uint: case Basic_rawptr:
-			return s.word_size;
+			return build_context.word_size;
 		}
 	} break;
 
@@ -1715,17 +1704,17 @@ i64 type_size_of_internal(BaseTypeSizes s, gbAllocator allocator, Type *t, TypeP
 		if (count == 0) {
 			return 0;
 		}
-		align = type_align_of_internal(s, allocator, t->Array.elem, path);
+		align = type_align_of_internal(allocator, t->Array.elem, path);
 		if (path->failure) {
 			return FAILURE_SIZE;
 		}
-		size  = type_size_of_internal(s,  allocator, t->Array.elem, path);
+		size  = type_size_of_internal( allocator, t->Array.elem, path);
 		alignment = align_formula(size, align);
 		return alignment*(count-1) + size;
 	} break;
 
 	case Type_DynamicArray:
-		return 3*s.word_size + type_size_of(s, allocator, t_allocator);
+		return 3*build_context.word_size + type_size_of(allocator, t_allocator);
 
 	case Type_Vector: {
 #if 0
@@ -1738,7 +1727,7 @@ i64 type_size_of_internal(BaseTypeSizes s, gbAllocator allocator, Type *t, TypeP
 		if (path->failure) {
 			return FAILURE_SIZE;
 		}
-		bit_size = 8*type_size_of_internal(s, allocator, t->Vector.elem, path);
+		bit_size = 8*type_size_of_internal(allocator, t->Vector.elem, path);
 		type_path_pop(path);
 		if (is_type_boolean(t->Vector.elem)) {
 			bit_size = 1; // NOTE(bill): LLVM can store booleans as 1 bit because a boolean _is_ an `i1`
@@ -1753,11 +1742,11 @@ i64 type_size_of_internal(BaseTypeSizes s, gbAllocator allocator, Type *t, TypeP
 		if (count == 0) {
 			return 0;
 		}
-		align = type_align_of_internal(s, allocator, t->Vector.elem, path);
+		align = type_align_of_internal(allocator, t->Vector.elem, path);
 		if (path->failure) {
 			return FAILURE_SIZE;
 		}
-		size  = type_size_of_internal(s,  allocator, t->Vector.elem, path);
+		size  = type_size_of_internal( allocator, t->Vector.elem, path);
 		alignment = align_formula(size, align);
 		return alignment*(count-1) + size;
 #endif
@@ -1765,11 +1754,11 @@ i64 type_size_of_internal(BaseTypeSizes s, gbAllocator allocator, Type *t, TypeP
 
 
 	case Type_Slice: // ptr + count
-		return 2 * s.word_size;
+		return 2 * build_context.word_size;
 
 	case Type_Map: {
 		if (t->Map.count == 0) { // Dynamic
-			return type_size_of_internal(s, allocator, t->Map.generated_struct_type, path);
+			return type_size_of_internal(allocator, t->Map.generated_struct_type, path);
 		}
 		GB_PANIC("TODO(bill): Fixed map size");
 	}
@@ -1780,9 +1769,9 @@ i64 type_size_of_internal(BaseTypeSizes s, gbAllocator allocator, Type *t, TypeP
 		if (count == 0) {
 			return 0;
 		}
-		align = type_align_of_internal(s, allocator, t, path);
-		type_set_offsets(s, allocator, t);
-		size = t->Tuple.offsets[count-1] + type_size_of_internal(s, allocator, t->Tuple.variables[count-1]->type, path);
+		align = type_align_of_internal(allocator, t, path);
+		type_set_offsets(allocator, t);
+		size = t->Tuple.offsets[count-1] + type_size_of_internal(allocator, t->Tuple.variables[count-1]->type, path);
 		return align_formula(size, align);
 	} break;
 
@@ -1793,44 +1782,44 @@ i64 type_size_of_internal(BaseTypeSizes s, gbAllocator allocator, Type *t, TypeP
 			if (count == 0) {
 				return 0;
 			}
-			i64 align = type_align_of_internal(s, allocator, t, path);
+			i64 align = type_align_of_internal(allocator, t, path);
 			if (path->failure) {
 				return FAILURE_SIZE;
 			}
-			type_set_offsets(s, allocator, t);
-			i64 size = t->Record.struct_offsets[count-1] + type_size_of_internal(s, allocator, t->Record.fields[count-1]->type, path);
+			type_set_offsets(allocator, t);
+			i64 size = t->Record.struct_offsets[count-1] + type_size_of_internal(allocator, t->Record.fields[count-1]->type, path);
 			return align_formula(size, align);
 		} break;
 
 		case TypeRecord_Union: {
 			i64 count = t->Record.field_count;
-			i64 align = type_align_of_internal(s, allocator, t, path);
+			i64 align = type_align_of_internal(allocator, t, path);
 			if (path->failure) {
 				return FAILURE_SIZE;
 			}
 			i64 max = 0;
 			// NOTE(bill): Zeroth field is invalid
 			for (isize i = 1; i < count; i++) {
-				i64 size = type_size_of_internal(s, allocator, t->Record.fields[i]->type, path);
+				i64 size = type_size_of_internal(allocator, t->Record.fields[i]->type, path);
 				if (max < size) {
 					max = size;
 				}
 			}
 			// NOTE(bill): Align to int
-			isize size =  align_formula(max, s.word_size);
-			size += type_size_of_internal(s, allocator, t_int, path);
+			isize size =  align_formula(max, build_context.word_size);
+			size += type_size_of_internal(allocator, t_int, path);
 			return align_formula(size, align);
 		} break;
 
 		case TypeRecord_RawUnion: {
 			i64 count = t->Record.field_count;
-			i64 align = type_align_of_internal(s, allocator, t, path);
+			i64 align = type_align_of_internal(allocator, t, path);
 			if (path->failure) {
 				return FAILURE_SIZE;
 			}
 			i64 max = 0;
 			for (isize i = 0; i < count; i++) {
-				i64 size = type_size_of_internal(s, allocator, t->Record.fields[i]->type, path);
+				i64 size = type_size_of_internal(allocator, t->Record.fields[i]->type, path);
 				if (max < size) {
 					max = size;
 				}
@@ -1843,18 +1832,18 @@ i64 type_size_of_internal(BaseTypeSizes s, gbAllocator allocator, Type *t, TypeP
 	}
 
 	// Catch all
-	return s.word_size;
+	return build_context.word_size;
 }
 
-i64 type_offset_of(BaseTypeSizes s, gbAllocator allocator, Type *t, i32 index) {
+i64 type_offset_of(gbAllocator allocator, Type *t, i32 index) {
 	t = base_type(t);
 	if (t->kind == Type_Record && t->Record.kind == TypeRecord_Struct) {
-		type_set_offsets(s, allocator, t);
+		type_set_offsets(allocator, t);
 		if (gb_is_between(index, 0, t->Record.field_count-1)) {
 			return t->Record.struct_offsets[index];
 		}
 	} else if (t->kind == Type_Tuple) {
-		type_set_offsets(s, allocator, t);
+		type_set_offsets(allocator, t);
 		if (gb_is_between(index, 0, t->Tuple.variable_count-1)) {
 			return t->Tuple.offsets[index];
 		}
@@ -1862,32 +1851,32 @@ i64 type_offset_of(BaseTypeSizes s, gbAllocator allocator, Type *t, i32 index) {
 		if (t->Basic.kind == Basic_string) {
 			switch (index) {
 			case 0: return 0;           // data
-			case 1: return s.word_size; // count
+			case 1: return build_context.word_size; // count
 			}
 		} else if (t->Basic.kind == Basic_any) {
 			switch (index) {
 			case 0: return 0;           // type_info
-			case 1: return s.word_size; // data
+			case 1: return build_context.word_size; // data
 			}
 		}
 	} else if (t->kind == Type_Slice) {
 		switch (index) {
 		case 0: return 0;             // data
-		case 1: return 1*s.word_size; // count
+		case 1: return 1*build_context.word_size; // count
 		}
 	} else if (t->kind == Type_DynamicArray) {
 		switch (index) {
 		case 0: return 0;             // data
-		case 1: return 1*s.word_size; // count
-		case 2: return 2*s.word_size; // capacity
-		case 3: return 3*s.word_size; // allocator
+		case 1: return 1*build_context.word_size; // count
+		case 2: return 2*build_context.word_size; // capacity
+		case 3: return 3*build_context.word_size; // allocator
 		}
 	}
 	return 0;
 }
 
 
-i64 type_offset_of_from_selection(BaseTypeSizes s, gbAllocator allocator, Type *type, Selection sel) {
+i64 type_offset_of_from_selection(gbAllocator allocator, Type *type, Selection sel) {
 	GB_ASSERT(sel.indirect == false);
 
 	Type *t = type;
@@ -1895,7 +1884,7 @@ i64 type_offset_of_from_selection(BaseTypeSizes s, gbAllocator allocator, Type *
 	for_array(i, sel.index) {
 		isize index = sel.index.e[i];
 		t = base_type(t);
-		offset += type_offset_of(s, allocator, t, index);
+		offset += type_offset_of(allocator, t, index);
 		if (t->kind == Type_Record && t->Record.kind == TypeRecord_Struct) {
 			t = t->Record.fields[index]->type;
 		} else {