Browse Source

Merge branch 'master' into llvm-integration

gingerBill 5 years ago
parent
commit
8ec5987ae1

+ 5 - 5
core/encoding/base64/base64.odin

@@ -49,7 +49,7 @@ encode :: proc(data: []byte, ENC_TBL := ENC_TABLE, allocator := context.allocato
     c0, c1, c2, block: int;
 
     for i, d := 0, 0; i < length; i, d = i + 3, d + 4 {
-        c0, c1, c2 = int(data[i]), 0, 0;
+        c0, c1, c2 = int(data[i]), -1, -1;
 
         if i + 1 < length do c1 = int(data[i + 1]);
         if i + 2 < length do c2 = int(data[i + 2]);
@@ -58,13 +58,13 @@ encode :: proc(data: []byte, ENC_TBL := ENC_TABLE, allocator := context.allocato
 
         out[d]     = ENC_TBL[block >> 18 & 63];
         out[d + 1] = ENC_TBL[block >> 12 & 63];
-        out[d + 2] = c1 == 0 ? PADDING : ENC_TBL[block >> 6 & 63];
-        out[d + 3] = c2 == 0 ? PADDING : ENC_TBL[block & 63];
+        out[d + 2] = c1 == -1 ? PADDING : ENC_TBL[block >> 6 & 63];
+        out[d + 3] = c2 == -1 ? PADDING : ENC_TBL[block & 63];
     }
     return string(out);
 }
 
-decode :: proc(data: string, DEC_TBL := DEC_TABLE, allocator := context.allocator) -> []byte #no_bounds_check{
+decode :: proc(data: string, DEC_TBL := DEC_TABLE, allocator := context.allocator) -> []byte #no_bounds_check {
     length := len(data);
     if length == 0 do return []byte{};
 
@@ -90,4 +90,4 @@ decode :: proc(data: string, DEC_TBL := DEC_TABLE, allocator := context.allocato
         out[j + 2] = byte(b2);
     }
     return out;
-}
+}

+ 41 - 60
core/os/os.odin

@@ -129,85 +129,66 @@ read_ptr :: proc(fd: Handle, data: rawptr, len: int) -> (int, Errno) {
 heap_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode,
                             size, alignment: int,
                             old_memory: rawptr, old_size: int, flags: u64 = 0, loc := #caller_location) -> rawptr {
-/*
+
 	//
-	// NOTE(tetra, 2019-11-10): The heap doesn't respect alignment.
-	// HACK: Overallocate, align forwards, and then use the two bytes immediately before
-	// the address we return, to store the padding we inserted.
-	// This allows us to pass the original pointer we got back from the heap to `free` later.
+	// NOTE(tetra, 2020-01-14): The heap doesn't respect alignment.
+	// Instead, we overallocate by `alignment + size_of(rawptr) - 1`, and insert
+	// padding. We also store the original pointer returned by heap_alloc right before
+	// the pointer we return to the user.
 	//
 
-	align_and_store_padding :: proc(ptr: rawptr, alignment: int) -> rawptr {
-		ptr := mem.ptr_offset(cast(^u8) ptr, 2);
-		new_ptr := cast(^u8) mem.align_forward(ptr, uintptr(alignment));
-		offset := mem.ptr_sub(new_ptr, cast(^u8) ptr) + 2;
-		assert(offset < int(max(u16)));
-		(^[2]u8)(mem.ptr_offset(new_ptr, -2))^ = transmute([2]u8) u16(offset);
-		return new_ptr;
-	}
-
-	recover_original_pointer :: proc(ptr: rawptr) -> rawptr {
-		ptr := cast(^u8) ptr;
-		offset := transmute(u16) (^[2]u8)(mem.ptr_offset(ptr, -2))^;
-		ptr = mem.ptr_offset(ptr, -int(offset));
-		return ptr;
-	}
-
-	aligned_heap_alloc :: proc(size: int, alignment: int) -> rawptr {
-		// NOTE(tetra): Alignment 1 will mean we only have one extra byte.
-		// This is not enough for a u16 - so we ensure there is at least two bytes extra.
-		// This also means that the pointer is always aligned to at least 2.
-		extra := alignment;
-		if extra <= 1 do extra = 2;
-
-		orig := cast(^u8) heap_alloc(size + extra);
-		if orig == nil do return nil;
-		ptr := align_and_store_padding(orig, alignment);
-		assert(recover_original_pointer(ptr) == orig);
-		return ptr;
-	}
+	aligned_alloc :: proc(size, alignment: int, old_ptr: rawptr = nil) -> rawptr {
+		a := max(alignment, align_of(rawptr));
+		space := size + a - 1;
 
-	switch mode {
-	case .Alloc:
-		return aligned_heap_alloc(size, alignment);
+		allocated_mem: rawptr;
+		if old_ptr != nil {
+			original_old_ptr := mem.ptr_offset((^rawptr)(old_ptr), -1)^;
+			allocated_mem = heap_resize(original_old_ptr, space+size_of(rawptr));
+		} else {
+			allocated_mem = heap_alloc(space+size_of(rawptr));
+		}
+		aligned_mem := rawptr(mem.ptr_offset((^u8)(allocated_mem), size_of(rawptr)));
 
-	case .Free:
-		if old_memory != nil {
-			ptr := recover_original_pointer(old_memory);
-			heap_free(ptr);
+		ptr := uintptr(aligned_mem);
+		aligned_ptr := (ptr - 1 + uintptr(a)) & -uintptr(a);
+		diff := int(aligned_ptr - ptr);
+		if (size + diff) > space {
+			return nil;
 		}
-		return nil;
 
-	case .Free_All:
-		// NOTE(bill): Does nothing
+		aligned_mem = rawptr(aligned_ptr);
+		mem.ptr_offset((^rawptr)(aligned_mem), -1)^ = allocated_mem;
 
-	case .Resize:
-		if old_memory == nil {
-			return aligned_heap_alloc(size, alignment);
+		return aligned_mem;
+	}
+
+	aligned_free :: proc(p: rawptr) {
+		if p != nil {
+			heap_free(mem.ptr_offset((^rawptr)(p), -1)^);
 		}
-		ptr := recover_original_pointer(old_memory);
-		ptr = heap_resize(ptr, size);
-		assert(ptr != nil);
-		return align_and_store_padding(ptr, alignment);
 	}
 
-	return nil;
-*/
+	aligned_resize :: proc(p: rawptr, old_size: int, new_size: int, new_alignment: int) -> rawptr {
+		if p == nil do return nil;
+		return aligned_alloc(new_size, new_alignment, p);
+	}
+
 	switch mode {
 	case .Alloc:
-		return heap_alloc(size);
+		return aligned_alloc(size, alignment);
 
 	case .Free:
-		if old_memory != nil {
-			heap_free(old_memory);
-		}
-		return nil;
+		aligned_free(old_memory);
 
 	case .Free_All:
-		// NOTE(bill): Does nothing
+		// NOTE(tetra): Do nothing.
 
 	case .Resize:
-		return heap_resize(old_memory, size);
+		if old_memory == nil {
+			return aligned_alloc(size, alignment);
+		}
+		return aligned_resize(old_memory, old_size, size, alignment);
 	}
 
 	return nil;

+ 6 - 5
core/runtime/core.odin

@@ -1141,11 +1141,12 @@ __dynamic_array_reserve :: proc(array_: rawptr, elem_size, elem_align: int, cap:
 	allocator := array.allocator;
 
 	new_data := allocator.procedure(allocator.data, .Resize, new_size, elem_align, array.data, old_size, 0, loc);
-	if new_data == nil do return false;
-
-	array.data = new_data;
-	array.cap = cap;
-	return true;
+	if new_data != nil || elem_size == 0 {
+		array.data = new_data;
+		array.cap = cap;
+		return true;
+	}
+	return false;
 }
 
 __dynamic_array_resize :: proc(array_: rawptr, elem_size, elem_align: int, len: int, loc := #caller_location) -> bool {

+ 22 - 0
src/check_expr.cpp

@@ -5280,6 +5280,10 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32
 		break;
 	}
 
+	case BuiltinProc_cpu_relax:
+		operand->mode = Addressing_NoValue;
+		break;
+
 	case BuiltinProc_atomic_fence:
 	case BuiltinProc_atomic_fence_acq:
 	case BuiltinProc_atomic_fence_rel:
@@ -5987,6 +5991,15 @@ CALL_ARGUMENT_CHECKER(check_call_arguments_internal) {
 				}
 				score += s;
 
+				if (e->flags & EntityFlag_ConstInput) {
+					if (o.mode != Addressing_Constant) {
+						if (show_error) {
+							error(o.expr, "Expected a constant value for the argument '%.*s'", LIT(e->token.string));
+						}
+						err = CallArgumentError_NoneConstantParameter;
+					}
+				}
+
 				if (o.mode == Addressing_Type && is_type_typeid(e->type)) {
 					add_type_info_type(c, o.type);
 					add_type_and_value(c->info, o.expr, Addressing_Value, e->type, exact_value_typeid(o.type));
@@ -6242,6 +6255,15 @@ CALL_ARGUMENT_CHECKER(check_named_call_arguments) {
 					}
 					err = CallArgumentError_WrongTypes;
 				}
+
+				if (e->flags & EntityFlag_ConstInput) {
+					if (o->mode != Addressing_Constant) {
+						if (show_error) {
+							error(o->expr, "Expected a constant value for the argument '%.*s'", LIT(e->token.string));
+						}
+						err = CallArgumentError_NoneConstantParameter;
+					}
+				}
 			}
 			score += s;
 		}

+ 6 - 0
src/check_stmt.cpp

@@ -1108,6 +1108,12 @@ void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags) {
 			if (type_expr != nullptr) { // Otherwise it's a default expression
 				Operand y = {};
 				check_expr_or_type(ctx, &y, type_expr);
+				if (y.mode != Addressing_Type) {
+					gbString str = expr_to_string(type_expr);
+					error(type_expr, "Expected a type as a case, got %s", str);
+					gb_string_free(str);
+					continue;
+				}
 
 				if (switch_kind == TypeSwitch_Union) {
 					GB_ASSERT(is_type_union(bt));

+ 4 - 1
src/check_type.cpp

@@ -1722,8 +1722,11 @@ Type *check_get_params(CheckerContext *ctx, Scope *scope, Ast *_params, bool *is
 			if (p->flags&FieldFlag_auto_cast) {
 				param->flags |= EntityFlag_AutoCast;
 			}
-			param->state = EntityState_Resolved; // NOTE(bill): This should have be resolved whilst determining it
+			if (p->flags&FieldFlag_const) {
+				param->flags |= EntityFlag_ConstInput;
+			}
 
+			param->state = EntityState_Resolved; // NOTE(bill): This should have be resolved whilst determining it
 			add_entity(ctx->checker, scope, name, param);
 			if (is_using) {
 				add_entity_use(ctx, name, param);

+ 4 - 0
src/checker_builtin_procs.hpp

@@ -36,6 +36,8 @@ enum BuiltinProcId {
 	BuiltinProc_simd_vector,
 	BuiltinProc_soa_struct,
 
+	BuiltinProc_cpu_relax,
+
 	BuiltinProc_atomic_fence,
 	BuiltinProc_atomic_fence_acq,
 	BuiltinProc_atomic_fence_rel,
@@ -214,6 +216,8 @@ gb_global BuiltinProc builtin_procs[BuiltinProc_COUNT] = {
 	{STR_LIT("simd_vector"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics}, // Type
 	{STR_LIT("soa_struct"),  2, false, Expr_Expr, BuiltinProcPkg_intrinsics}, // Type
 
+	{STR_LIT("cpu_relax"),  0, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
+
 	{STR_LIT("atomic_fence"),        0, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
 	{STR_LIT("atomic_fence_acq"),    0, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
 	{STR_LIT("atomic_fence_rel"),    0, false, Expr_Stmt, BuiltinProcPkg_intrinsics},

+ 1 - 0
src/entity.cpp

@@ -47,6 +47,7 @@ enum EntityFlag {
 	EntityFlag_BitFieldValue = 1<<12,
 	EntityFlag_PolyConst     = 1<<13,
 	EntityFlag_NotExported   = 1<<14,
+	EntityFlag_ConstInput    = 1<<15,
 
 	EntityFlag_Static        = 1<<16,
 

+ 12 - 0
src/ir.cpp

@@ -197,6 +197,7 @@ gbAllocator ir_allocator(void) {
 	IR_INSTR_KIND(ZeroInit, struct { irValue *address; })             \
 	IR_INSTR_KIND(Store,    struct { irValue *address, *value; bool is_volatile; }) \
 	IR_INSTR_KIND(Load,     struct { Type *type; irValue *address; i64 custom_align; }) \
+	IR_INSTR_KIND(InlineCode, struct { BuiltinProcId id; Array<irValue *> operands; }) \
 	IR_INSTR_KIND(AtomicFence, struct { BuiltinProcId id; })          \
 	IR_INSTR_KIND(AtomicStore, struct {                               \
 		irValue *address, *value;                                     \
@@ -1063,6 +1064,14 @@ irValue *ir_instr_load(irProcedure *p, irValue *address) {
 	return v;
 }
 
+irValue *ir_instr_inline_code(irProcedure *p, BuiltinProcId id, Array<irValue *> operands) {
+	irValue *v = ir_alloc_instr(p, irInstr_InlineCode);
+	irInstr *i = &v->Instr;
+	i->InlineCode.id = id;
+	i->InlineCode.operands = operands;
+	return v;
+}
+
 irValue *ir_instr_atomic_fence(irProcedure *p, BuiltinProcId id) {
 	irValue *v = ir_alloc_instr(p, irInstr_AtomicFence);
 	irInstr *i = &v->Instr;
@@ -6886,6 +6895,9 @@ irValue *ir_build_builtin_proc(irProcedure *proc, Ast *expr, TypeAndValue tv, Bu
 
 
 	// "Intrinsics"
+	case BuiltinProc_cpu_relax:
+		return ir_emit(proc, ir_instr_inline_code(proc, id, {}));
+
 	case BuiltinProc_atomic_fence:
 	case BuiltinProc_atomic_fence_acq:
 	case BuiltinProc_atomic_fence_rel:

+ 12 - 0
src/ir_print.cpp

@@ -1482,6 +1482,18 @@ void ir_print_instr(irFileBuffer *f, irModule *m, irValue *value) {
 		break;
 	}
 
+	case irInstr_InlineCode:
+		{
+			switch (instr->InlineCode.id) {
+			case BuiltinProc_cpu_relax:
+				ir_write_str_lit(f, "call void asm sideeffect \"pause\", \"\"()");
+				break;
+			default: GB_PANIC("Unknown inline code %d", instr->InlineCode.id); break;
+			}
+		}
+		break;
+
+
 	case irInstr_AtomicFence:
 		ir_write_str_lit(f, "fence ");
 		switch (instr->AtomicFence.id) {

+ 8 - 0
src/parser.cpp

@@ -2998,6 +2998,7 @@ enum FieldPrefixKind {
 	FieldPrefix_Invalid = 0,
 
 	FieldPrefix_using,
+	FieldPrefix_const,
 	FieldPrefix_no_alias,
 	FieldPrefix_c_var_arg,
 	FieldPrefix_auto_cast,
@@ -3024,6 +3025,9 @@ FieldPrefixKind is_token_field_prefix(AstFile *f) {
 				return FieldPrefix_c_var_arg;
 			}
 			break;
+
+		case Token_const:
+			return FieldPrefix_const;
 		}
 		return FieldPrefix_Unknown;
 	}
@@ -3036,6 +3040,7 @@ u32 parse_field_prefixes(AstFile *f) {
 	i32 no_alias_count  = 0;
 	i32 c_vararg_count  = 0;
 	i32 auto_cast_count = 0;
+	i32 const_count     = 0;
 
 	for (;;) {
 		FieldPrefixKind kind = is_token_field_prefix(f);
@@ -3053,12 +3058,14 @@ u32 parse_field_prefixes(AstFile *f) {
 		case FieldPrefix_no_alias:  no_alias_count  += 1; advance_token(f); break;
 		case FieldPrefix_c_var_arg: c_vararg_count  += 1; advance_token(f); break;
 		case FieldPrefix_auto_cast: auto_cast_count += 1; advance_token(f); break;
+		case FieldPrefix_const:     const_count     += 1; advance_token(f); break;
 		}
 	}
 	if (using_count     > 1) syntax_error(f->curr_token, "Multiple 'using' in this field list");
 	if (no_alias_count  > 1) syntax_error(f->curr_token, "Multiple '#no_alias' in this field list");
 	if (c_vararg_count  > 1) syntax_error(f->curr_token, "Multiple '#c_vararg' in this field list");
 	if (auto_cast_count > 1) syntax_error(f->curr_token, "Multiple 'auto_cast' in this field list");
+	if (const_count     > 1) syntax_error(f->curr_token, "Multiple '#const' in this field list");
 
 
 	u32 field_flags = 0;
@@ -3066,6 +3073,7 @@ u32 parse_field_prefixes(AstFile *f) {
 	if (no_alias_count  > 0) field_flags |= FieldFlag_no_alias;
 	if (c_vararg_count  > 0) field_flags |= FieldFlag_c_vararg;
 	if (auto_cast_count > 0) field_flags |= FieldFlag_auto_cast;
+	if (const_count     > 0) field_flags |= FieldFlag_const;
 	return field_flags;
 }
 

+ 2 - 1
src/parser.hpp

@@ -203,12 +203,13 @@ enum FieldFlag {
 	FieldFlag_no_alias  = 1<<2,
 	FieldFlag_c_vararg  = 1<<3,
 	FieldFlag_auto_cast = 1<<4,
+	FieldFlag_const     = 1<<5,
 
 	FieldFlag_Tags = 1<<10,
 
 	FieldFlag_Results   = 1<<16,
 
-	FieldFlag_Signature = FieldFlag_ellipsis|FieldFlag_using|FieldFlag_no_alias|FieldFlag_c_vararg|FieldFlag_auto_cast,
+	FieldFlag_Signature = FieldFlag_ellipsis|FieldFlag_using|FieldFlag_no_alias|FieldFlag_c_vararg|FieldFlag_auto_cast|FieldFlag_const,
 	FieldFlag_Struct    = FieldFlag_using|FieldFlag_Tags,
 };