Browse Source

Begin work on supporting `wasm64`; Correct `wasm32` compilation behaviour

gingerBill 4 years ago
parent
commit
5bc8a491a7

+ 156 - 142
core/runtime/default_temporary_allocator.odin

@@ -17,175 +17,189 @@ Default_Temp_Allocator :: struct {
 	leaked_allocations: [dynamic][]byte,
 }
 
-default_temp_allocator_init :: proc(s: ^Default_Temp_Allocator, size: int, backup_allocator := context.allocator) {
-	s.data = make_aligned([]byte, size, 2*align_of(rawptr), backup_allocator)
-	s.curr_offset = 0
-	s.prev_allocation = nil
-	s.backup_allocator = backup_allocator
-	s.leaked_allocations.allocator = backup_allocator
-}
-
-default_temp_allocator_destroy :: proc(s: ^Default_Temp_Allocator) {
-	if s == nil {
-		return
-	}
-	for ptr in s.leaked_allocations {
-		free(raw_data(ptr), s.backup_allocator)
+when ODIN_OS == "freestanding" {
+	default_temp_allocator_init :: proc(s: ^Default_Temp_Allocator, size: int, backup_allocator := context.allocator) {
+	}
+	
+	default_temp_allocator_destroy :: proc(s: ^Default_Temp_Allocator) {
+	}
+	
+	default_temp_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
+	                                    size, alignment: int,
+	                                    old_memory: rawptr, old_size: int, loc := #caller_location) -> (data: []byte, err: Allocator_Error) {
+		return nil, nil		
+	}
+} else {
+	default_temp_allocator_init :: proc(s: ^Default_Temp_Allocator, size: int, backup_allocator := context.allocator) {
+		s.data = make_aligned([]byte, size, 2*align_of(rawptr), backup_allocator)
+		s.curr_offset = 0
+		s.prev_allocation = nil
+		s.backup_allocator = backup_allocator
+		s.leaked_allocations.allocator = backup_allocator
 	}
-	delete(s.leaked_allocations)
-	delete(s.data, s.backup_allocator)
-	s^ = {}
-}
-
-@(private)
-default_temp_allocator_alloc :: proc(s: ^Default_Temp_Allocator, size, alignment: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
-	size := size
-	size = align_forward_int(size, alignment)
-
-	switch {
-	case s.curr_offset+size <= len(s.data):
-		start := uintptr(raw_data(s.data))
-		ptr := start + uintptr(s.curr_offset)
-		ptr = align_forward_uintptr(ptr, uintptr(alignment))
-		mem_zero(rawptr(ptr), size)
 
-		s.prev_allocation = rawptr(ptr)
-		offset := int(ptr - start)
-		s.curr_offset = offset + size
-		return byte_slice(rawptr(ptr), size), .None
+	default_temp_allocator_destroy :: proc(s: ^Default_Temp_Allocator) {
+		if s == nil {
+			return
+		}
+		for ptr in s.leaked_allocations {
+			free(raw_data(ptr), s.backup_allocator)
+		}
+		delete(s.leaked_allocations)
+		delete(s.data, s.backup_allocator)
+		s^ = {}
+	}
+
+	@(private)
+	default_temp_allocator_alloc :: proc(s: ^Default_Temp_Allocator, size, alignment: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
+		size := size
+		size = align_forward_int(size, alignment)
+
+		switch {
+		case s.curr_offset+size <= len(s.data):
+			start := uintptr(raw_data(s.data))
+			ptr := start + uintptr(s.curr_offset)
+			ptr = align_forward_uintptr(ptr, uintptr(alignment))
+			mem_zero(rawptr(ptr), size)
+
+			s.prev_allocation = rawptr(ptr)
+			offset := int(ptr - start)
+			s.curr_offset = offset + size
+			return byte_slice(rawptr(ptr), size), .None
+
+		case size <= len(s.data):
+			start := uintptr(raw_data(s.data))
+			ptr := align_forward_uintptr(start, uintptr(alignment))
+			mem_zero(rawptr(ptr), size)
+
+			s.prev_allocation = rawptr(ptr)
+			offset := int(ptr - start)
+			s.curr_offset = offset + size
+			return byte_slice(rawptr(ptr), size), .None
+		}
+		a := s.backup_allocator
+		if a.procedure == nil {
+			a = context.allocator
+			s.backup_allocator = a
+		}
 
-	case size <= len(s.data):
-		start := uintptr(raw_data(s.data))
-		ptr := align_forward_uintptr(start, uintptr(alignment))
-		mem_zero(rawptr(ptr), size)
+		data, err := mem_alloc_bytes(size, alignment, a, loc)
+		if err != nil {
+			return data, err
+		}
+		if s.leaked_allocations == nil {
+			s.leaked_allocations = make([dynamic][]byte, a)
+		}
+		append(&s.leaked_allocations, data)
 
-		s.prev_allocation = rawptr(ptr)
-		offset := int(ptr - start)
-		s.curr_offset = offset + size
-		return byte_slice(rawptr(ptr), size), .None
-	}
-	a := s.backup_allocator
-	if a.procedure == nil {
-		a = context.allocator
-		s.backup_allocator = a
-	}
+		// TODO(bill): Should leaks be notified about?
+		if logger := context.logger; logger.lowest_level <= .Warning {
+			if logger.procedure != nil {
+				logger.procedure(logger.data, .Warning, "default temp allocator resorted to backup_allocator" , logger.options, loc)
+			}
+		}
 
-	data, err := mem_alloc_bytes(size, alignment, a, loc)
-	if err != nil {
-		return data, err
-	}
-	if s.leaked_allocations == nil {
-		s.leaked_allocations = make([dynamic][]byte, a)
+		return data, .None
 	}
-	append(&s.leaked_allocations, data)
 
-	// TODO(bill): Should leaks be notified about?
-	if logger := context.logger; logger.lowest_level <= .Warning {
-		if logger.procedure != nil {
-			logger.procedure(logger.data, .Warning, "default temp allocator resorted to backup_allocator" , logger.options, loc)
+	@(private)
+	default_temp_allocator_free :: proc(s: ^Default_Temp_Allocator, old_memory: rawptr, loc := #caller_location) -> Allocator_Error {
+		if old_memory == nil {
+			return .None
 		}
-	}
 
-	return data, .None
-}
-
-@(private)
-default_temp_allocator_free :: proc(s: ^Default_Temp_Allocator, old_memory: rawptr, loc := #caller_location) -> Allocator_Error {
-	if old_memory == nil {
-		return .None
-	}
-
-	start := uintptr(raw_data(s.data))
-	end := start + uintptr(len(s.data))
-	old_ptr := uintptr(old_memory)
+		start := uintptr(raw_data(s.data))
+		end := start + uintptr(len(s.data))
+		old_ptr := uintptr(old_memory)
 
-	if s.prev_allocation == old_memory {
-		s.curr_offset = int(uintptr(s.prev_allocation) - start)
-		s.prev_allocation = nil
-		return .None
-	}
+		if s.prev_allocation == old_memory {
+			s.curr_offset = int(uintptr(s.prev_allocation) - start)
+			s.prev_allocation = nil
+			return .None
+		}
 
-	if start <= old_ptr && old_ptr < end {
-		// NOTE(bill): Cannot free this pointer but it is valid
-		return .None
-	}
+		if start <= old_ptr && old_ptr < end {
+			// NOTE(bill): Cannot free this pointer but it is valid
+			return .None
+		}
 
-	if len(s.leaked_allocations) != 0 {
-		for data, i in s.leaked_allocations {
-			ptr := raw_data(data)
-			if ptr == old_memory {
-				free(ptr, s.backup_allocator)
-				ordered_remove(&s.leaked_allocations, i)
-				return .None
+		if len(s.leaked_allocations) != 0 {
+			for data, i in s.leaked_allocations {
+				ptr := raw_data(data)
+				if ptr == old_memory {
+					free(ptr, s.backup_allocator)
+					ordered_remove(&s.leaked_allocations, i)
+					return .None
+				}
 			}
 		}
+		return .Invalid_Pointer
+		// panic("invalid pointer passed to default_temp_allocator");
 	}
-	return .Invalid_Pointer
-	// panic("invalid pointer passed to default_temp_allocator");
-}
 
-@(private)
-default_temp_allocator_free_all :: proc(s: ^Default_Temp_Allocator, loc := #caller_location) {
-	s.curr_offset = 0
-	s.prev_allocation = nil
-	for data in s.leaked_allocations {
-		free(raw_data(data), s.backup_allocator)
-	}
-	clear(&s.leaked_allocations)
-}
-
-@(private)
-default_temp_allocator_resize :: proc(s: ^Default_Temp_Allocator, old_memory: rawptr, old_size, size, alignment: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
-	begin := uintptr(raw_data(s.data))
-	end := begin + uintptr(len(s.data))
-	old_ptr := uintptr(old_memory)
-	if old_memory == s.prev_allocation && old_ptr & uintptr(alignment)-1 == 0 {
-		if old_ptr+uintptr(size) < end {
-			s.curr_offset = int(old_ptr-begin)+size
-			return byte_slice(old_memory, size), .None
+	@(private)
+	default_temp_allocator_free_all :: proc(s: ^Default_Temp_Allocator, loc := #caller_location) {
+		s.curr_offset = 0
+		s.prev_allocation = nil
+		for data in s.leaked_allocations {
+			free(raw_data(data), s.backup_allocator)
 		}
+		clear(&s.leaked_allocations)
+	}
+
+	@(private)
+	default_temp_allocator_resize :: proc(s: ^Default_Temp_Allocator, old_memory: rawptr, old_size, size, alignment: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
+		begin := uintptr(raw_data(s.data))
+		end := begin + uintptr(len(s.data))
+		old_ptr := uintptr(old_memory)
+		if old_memory == s.prev_allocation && old_ptr & uintptr(alignment)-1 == 0 {
+			if old_ptr+uintptr(size) < end {
+				s.curr_offset = int(old_ptr-begin)+size
+				return byte_slice(old_memory, size), .None
+			}
+		}
+		data, err := default_temp_allocator_alloc(s, size, alignment, loc)
+		if err == .None {
+			copy(data, byte_slice(old_memory, old_size))
+			err = default_temp_allocator_free(s, old_memory, loc)
+		}
+		return data, err
 	}
-	data, err := default_temp_allocator_alloc(s, size, alignment, loc)
-	if err == .None {
-		copy(data, byte_slice(old_memory, old_size))
-		err = default_temp_allocator_free(s, old_memory, loc)
-	}
-	return data, err
-}
 
-default_temp_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
-                                    size, alignment: int,
-                                    old_memory: rawptr, old_size: int, loc := #caller_location) -> (data: []byte, err: Allocator_Error) {
+	default_temp_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
+	                                    size, alignment: int,
+	                                    old_memory: rawptr, old_size: int, loc := #caller_location) -> (data: []byte, err: Allocator_Error) {
 
-	s := (^Default_Temp_Allocator)(allocator_data)
+		s := (^Default_Temp_Allocator)(allocator_data)
 
-	if s.data == nil {
-		default_temp_allocator_init(s, DEFAULT_TEMP_ALLOCATOR_BACKING_SIZE, default_allocator())
-	}
+		if s.data == nil {
+			default_temp_allocator_init(s, DEFAULT_TEMP_ALLOCATOR_BACKING_SIZE, default_allocator())
+		}
+
+		switch mode {
+		case .Alloc:
+			data, err = default_temp_allocator_alloc(s, size, alignment, loc)
+		case .Free:
+			err = default_temp_allocator_free(s, old_memory, loc)
 
-	switch mode {
-	case .Alloc:
-		data, err = default_temp_allocator_alloc(s, size, alignment, loc)
-	case .Free:
-		err = default_temp_allocator_free(s, old_memory, loc)
+		case .Free_All:
+			default_temp_allocator_free_all(s, loc)
 
-	case .Free_All:
-		default_temp_allocator_free_all(s, loc)
+		case .Resize:
+			data, err = default_temp_allocator_resize(s, old_memory, old_size, size, alignment, loc)
 
-	case .Resize:
-		data, err = default_temp_allocator_resize(s, old_memory, old_size, size, alignment, loc)
+		case .Query_Features:
+			set := (^Allocator_Mode_Set)(old_memory)
+			if set != nil {
+				set^ = {.Alloc, .Free, .Free_All, .Resize, .Query_Features}
+			}
 
-	case .Query_Features:
-		set := (^Allocator_Mode_Set)(old_memory)
-		if set != nil {
-			set^ = {.Alloc, .Free, .Free_All, .Resize, .Query_Features}
+		case .Query_Info:
+			// Nothing to give
 		}
 
-	case .Query_Info:
-		// Nothing to give
+		return
 	}
-
-	return
 }
 
 default_temp_allocator :: proc(allocator: ^Default_Temp_Allocator) -> Allocator {
@@ -193,4 +207,4 @@ default_temp_allocator :: proc(allocator: ^Default_Temp_Allocator) -> Allocator
 		procedure = default_temp_allocator_proc,
 		data = allocator,
 	}
-}
+}

+ 27 - 3
src/build_settings.cpp

@@ -29,6 +29,7 @@ enum TargetArchKind {
 	TargetArch_386,
 	TargetArch_arm64,
 	TargetArch_wasm32,
+	TargetArch_wasm64,
 
 	TargetArch_COUNT,
 };
@@ -59,6 +60,7 @@ String target_arch_names[TargetArch_COUNT] = {
 	str_lit("386"),
 	str_lit("arm64"),
 	str_lit("wasm32"),
+	str_lit("wasm64"),
 };
 
 String target_endian_names[TargetEndian_COUNT] = {
@@ -72,6 +74,7 @@ TargetEndianKind target_endians[TargetArch_COUNT] = {
 	TargetEndian_Little,
 	TargetEndian_Little,
 	TargetEndian_Little,
+	TargetEndian_Little,
 };
 
 #ifndef ODIN_VERSION_RAW
@@ -335,6 +338,16 @@ gb_global TargetMetrics target_freestanding_wasm32 = {
 	str_lit(""),
 };
 
+gb_global TargetMetrics target_freestanding_wasm64 = {
+	TargetOs_freestanding,
+	TargetArch_wasm64,
+	8,
+	16,
+	str_lit("wasm64-freestanding-js"),
+	str_lit(""),
+};
+
+
 
 
 struct NamedTargetMetrics {
@@ -353,6 +366,7 @@ gb_global NamedTargetMetrics named_targets[] = {
 	{ str_lit("freebsd_386"),    &target_freebsd_386    },
 	{ str_lit("freebsd_amd64"),  &target_freebsd_amd64  },
 	{ str_lit("freestanding_wasm32"), &target_freestanding_wasm32 },
+	{ str_lit("freestanding_wasm64"), &target_freestanding_wasm64 },
 };
 
 NamedTargetMetrics *selected_target_metrics;
@@ -458,11 +472,21 @@ bool find_library_collection_path(String name, String *path) {
 }
 
 bool is_arch_wasm(void) {
-	return build_context.metrics.arch == TargetArch_wasm32;
+	switch (build_context.metrics.arch) {
+	case TargetArch_wasm32:
+	case TargetArch_wasm64:
+		return true;
+	}
+	return false;
 }
 
 bool allow_check_foreign_filepath(void) {
-	return build_context.metrics.arch != TargetArch_wasm32;
+	switch (build_context.metrics.arch) {
+	case TargetArch_wasm32:
+	case TargetArch_wasm64:
+		return false;
+	}
+	return true;
 }
 
 
@@ -870,7 +894,7 @@ void init_build_context(TargetMetrics *cross_target) {
 			break;
 		}
 
-	} else if (bc->metrics.arch == TargetArch_wasm32) {
+	} else if (is_arch_wasm()) {
 		bc->link_flags = str_lit("--no-entry --export-table --export-all --allow-undefined ");
 	} else {
 		gb_printf_err("Compiler Error: Unsupported architecture\n");;

+ 12 - 4
src/llvm_abi.cpp

@@ -1061,19 +1061,27 @@ LB_ABI_INFO(lb_get_abi_info) {
 		}
 	}
 
-	if (build_context.metrics.arch == TargetArch_amd64) {
+	switch (build_context.metrics.arch) {
+	case TargetArch_amd64:
 		if (build_context.metrics.os == TargetOs_windows) {
 			return lbAbiAmd64Win64::abi_info(c, arg_types, arg_count, return_type, return_is_defined, calling_convention);
 		} else {
 			return lbAbiAmd64SysV::abi_info(c, arg_types, arg_count, return_type, return_is_defined, calling_convention);
 		}
-	} else if (build_context.metrics.arch == TargetArch_386) {
+	case TargetArch_386:
 		return lbAbi386::abi_info(c, arg_types, arg_count, return_type, return_is_defined, calling_convention);
-	} else if (build_context.metrics.arch == TargetArch_arm64) {
+	case TargetArch_arm64:
 		return lbAbiArm64::abi_info(c, arg_types, arg_count, return_type, return_is_defined, calling_convention);
-	} else if (build_context.metrics.arch == TargetArch_wasm32) {
+	case TargetArch_wasm32:
+		// TODO(bill): implement wasm32's ABI correct 
+		// NOTE(bill): this ABI is only an issue for WASI compatibility
 		return lbAbi386::abi_info(c, arg_types, arg_count, return_type, return_is_defined, calling_convention);
+	case TargetArch_wasm64:
+		// TODO(bill): implement wasm64's ABI correct 
+		// NOTE(bill): this ABI is only an issue for WASI compatibility
+		return lbAbiAmd64SysV::abi_info(c, arg_types, arg_count, return_type, return_is_defined, calling_convention);
 	}
+
 	GB_PANIC("Unsupported ABI");
 	return {};
 }

+ 1 - 0
src/llvm_backend.cpp

@@ -1148,6 +1148,7 @@ void lb_generate_code(lbGenerator *gen) {
 		LLVMInitializeAArch64Disassembler();
 		break;
 	case TargetArch_wasm32:
+	case TargetArch_wasm64:
 		LLVMInitializeWebAssemblyTargetInfo();
 		LLVMInitializeWebAssemblyTarget();
 		LLVMInitializeWebAssemblyTargetMC();

+ 2 - 0
src/llvm_backend_expr.cpp

@@ -496,6 +496,7 @@ bool lb_is_matrix_simdable(Type *t) {
 		break;
 	case TargetArch_386:
 	case TargetArch_wasm32:
+	case TargetArch_wasm64:
 		// nope
 		return false;
 	}
@@ -513,6 +514,7 @@ bool lb_is_matrix_simdable(Type *t) {
 				return true;
 			case TargetArch_386:
 			case TargetArch_wasm32:
+			case TargetArch_wasm64:
 				return false;
 			}
 		}

+ 1 - 0
src/llvm_backend_utility.cpp

@@ -1504,6 +1504,7 @@ lbValue lb_emit_mul_add(lbProcedure *p, lbValue a, lbValue b, lbValue c, Type *t
 			break;
 		case TargetArch_386:
 		case TargetArch_wasm32:
+		case TargetArch_wasm64:
 			is_possible = false;
 			break;
 		}

+ 13 - 6
src/main.cpp

@@ -135,13 +135,20 @@ i32 linker_stage(lbGenerator *gen) {
 
 	if (is_arch_wasm()) {
 		timings_start_section(timings, str_lit("wasm-ld"));
-		result = system_exec_command_line_app("wasm-ld",
-			"\"%.*s\\bin\\wasm-ld\" \"%.*s.wasm-obj\" -o \"%.*s.wasm\" %.*s %.*s",
-			LIT(build_context.ODIN_ROOT),
-			LIT(output_base), LIT(output_base), LIT(build_context.link_flags), LIT(build_context.extra_linker_flags));
-		if (result) {
-			return result;
+		
+		if (build_context.metrics.arch == TargetArch_wasm32) {
+			result = system_exec_command_line_app("wasm-ld",
+				"\"%.*s\\bin\\wasm-ld\" \"%.*s.wasm.o\" -o \"%.*s.wasm\" %.*s %.*s",
+				LIT(build_context.ODIN_ROOT),
+				LIT(output_base), LIT(output_base), LIT(build_context.link_flags), LIT(build_context.extra_linker_flags));
+		} else {
+			GB_ASSERT(build_context.metrics.arch == TargetArch_wasm64);
+			result = system_exec_command_line_app("wasm-ld",
+				"\"%.*s\\bin\\wasm-ld\" \"%.*s.wasm.o\" -o \"%.*s.wasm\" %.*s %.*s",
+				LIT(build_context.ODIN_ROOT),
+				LIT(output_base), LIT(output_base), LIT(build_context.link_flags), LIT(build_context.extra_linker_flags));
 		}
+		return result;
 	}
 
 	if (build_context.cross_compiling && selected_target_metrics->metrics == &target_essence_amd64) {