Browse Source

Merge branch 'master' into tilde

gingerBill 2 years ago
parent
commit
b495a302b0
55 changed files with 1077 additions and 887 deletions
  1. 6 3
      build.bat
  2. 7 4
      build_odin.sh
  3. 1 0
      core/compress/zlib/zlib.odin
  4. 17 20
      core/encoding/entity/entity.odin
  5. 21 23
      core/encoding/xml/debug_print.odin
  6. 3 3
      core/encoding/xml/example/xml_example.odin
  7. 17 12
      core/encoding/xml/helpers.odin
  8. 16 16
      core/encoding/xml/tokenizer.odin
  9. 15 27
      core/encoding/xml/xml_reader.odin
  10. 11 11
      core/fmt/fmt.odin
  11. 8 7
      core/image/netpbm/helpers.odin
  12. 1 0
      core/image/netpbm/netpbm.odin
  13. 3 4
      core/image/png/helpers.odin
  14. 8 8
      core/image/png/png.odin
  15. 12 12
      core/math/math.odin
  16. 52 52
      core/mem/allocators.odin
  17. 35 35
      core/net/url.odin
  18. 86 92
      core/odin/printer/visit.odin
  19. 16 16
      core/odin/tokenizer/tokenizer.odin
  20. 118 98
      core/runtime/dynamic_map_internal.odin
  21. 2 0
      core/text/i18n/i18n.odin
  22. 34 8
      core/text/i18n/qt_linguist.odin
  23. 2 2
      core/thread/thread_windows.odin
  24. 19 17
      core/time/time.odin
  25. 1 0
      examples/demo/demo.odin
  26. 39 3
      src/build_settings.cpp
  27. 1 1
      src/check_builtin.cpp
  28. 10 33
      src/check_decl.cpp
  29. 14 34
      src/check_expr.cpp
  30. 11 6
      src/check_stmt.cpp
  31. 6 0
      src/check_type.cpp
  32. 63 36
      src/checker.cpp
  33. 3 2
      src/checker.hpp
  34. 3 4
      src/entity.cpp
  35. 2 5
      src/exact_value.cpp
  36. 1 2
      src/llvm_backend_general.cpp
  37. 1 4
      src/llvm_backend_proc.cpp
  38. 1 4
      src/llvm_backend_stmt.cpp
  39. 0 1
      src/llvm_backend_type.cpp
  40. 89 39
      src/main.cpp
  41. 127 33
      src/parser.cpp
  42. 2 0
      src/parser.hpp
  43. 2 2
      src/tokenizer.cpp
  44. 14 10
      src/types.cpp
  45. 14 20
      tests/core/encoding/hxa/test_core_hxa.odin
  46. 33 36
      tests/core/encoding/xml/test_core_xml.odin
  47. 4 10
      tests/core/math/linalg/glsl/test_linalg_glsl_math.odin
  48. 73 94
      tests/core/math/test_core_math.odin
  49. 6 12
      tests/core/path/filepath/test_core_filepath.odin
  50. 16 20
      tests/core/reflect/test_core_reflect.odin
  51. 2 6
      tests/core/text/i18n/test_core_text_i18n.odin
  52. 1 0
      tests/issues/run.bat
  53. 1 0
      tests/issues/run.sh
  54. 26 0
      tests/issues/test_issue_2666.odin
  55. 1 0
      vendor/fontstash/fontstash.odin

+ 6 - 3
build.bat

@@ -51,7 +51,10 @@ set compiler_flags= -nologo -Oi -TP -fp:precise -Gm- -MP -FC -EHsc- -GR- -GF
 set compiler_defines= -DODIN_VERSION_RAW=\"%odin_version_raw%\"
 
 if not exist .git\ goto skip_git_hash
-for /f %%i in ('git rev-parse --short HEAD') do set GIT_SHA=%%i
+for /f "tokens=1,2" %%i IN ('git show "--pretty=%%cd %%h" "--date=format:%%Y-%%m" --no-patch --no-notes HEAD') do (
+	set odin_version_raw=dev-%%i
+	set GIT_SHA=%%j
+)
 if %ERRORLEVEL% equ 0 set compiler_defines=%compiler_defines% -DGIT_SHA=\"%GIT_SHA%\"
 :skip_git_hash
 
@@ -75,8 +78,8 @@ set compiler_includes= ^
 set libs= ^
 	kernel32.lib ^
 	Synchronization.lib ^
-	src\tilde\tb.lib ^
-	bin\llvm\windows\LLVM-C.lib
+	bin\llvm\windows\LLVM-C.lib ^
+	src\tilde\tb.lib
 
 set linker_flags= -incremental:no -opt:ref -subsystem:console
 

+ 7 - 4
build_odin.sh

@@ -8,17 +8,20 @@ set -eu
 : ${ODIN_VERSION=dev-$(date +"%Y-%m")}
 : ${GIT_SHA=}
 
-CPPFLAGS="$CPPFLAGS -DODIN_VERSION_RAW=\"$ODIN_VERSION\""
 CXXFLAGS="$CXXFLAGS -std=c++14"
 LDFLAGS="$LDFLAGS -pthread -lm -lstdc++"
 
-if [ -d ".git" ]; then
-	GIT_SHA=$(git rev-parse --short HEAD || :)
-	if [ "$GIT_SHA" ]; then
+if [ -d ".git" ] && [ $(which git) ]; then
+	versionTag=( $(git show --pretty='%cd %h' --date=format:%Y-%m --no-patch --no-notes HEAD) )
+	if [ $? -eq 0 ]; then
+		ODIN_VERSION="dev-${versionTag[0]}"
+		GIT_SHA="${versionTag[1]}"
 		CPPFLAGS="$CPPFLAGS -DGIT_SHA=\"$GIT_SHA\""
 	fi
 fi
 
+CPPFLAGS="$CPPFLAGS -DODIN_VERSION_RAW=\"$ODIN_VERSION\""
+
 DISABLED_WARNINGS="-Wno-switch -Wno-macro-redefined -Wno-unused-value"
 OS=$(uname)
 

+ 1 - 0
core/compress/zlib/zlib.odin

@@ -1,3 +1,4 @@
+//+vet !using-param
 package zlib
 
 /*

+ 17 - 20
core/encoding/entity/entity.odin

@@ -184,28 +184,26 @@ decode_xml :: proc(input: string, options := XML_Decode_Options{}, allocator :=
 
 advance :: proc(t: ^Tokenizer) -> (err: Error) {
 	if t == nil { return .Tokenizer_Is_Nil }
-	using t
-
 	#no_bounds_check {
-		if read_offset < len(src) {
-			offset = read_offset
-			r, w   = rune(src[read_offset]), 1
+		if t.read_offset < len(t.src) {
+			t.offset = t.read_offset
+			t.r, t.w   = rune(t.src[t.read_offset]), 1
 			switch {
-			case r == 0:
+			case t.r == 0:
 				return .Illegal_NUL_Character
-			case r >= utf8.RUNE_SELF:
-				r, w = utf8.decode_rune_in_string(src[read_offset:])
-				if r == utf8.RUNE_ERROR && w == 1 {
+			case t.r >= utf8.RUNE_SELF:
+				t.r, t.w = utf8.decode_rune_in_string(t.src[t.read_offset:])
+				if t.r == utf8.RUNE_ERROR && t.w == 1 {
 					return .Illegal_UTF_Encoding
-				} else if r == utf8.RUNE_BOM && offset > 0 {
+				} else if t.r == utf8.RUNE_BOM && t.offset > 0 {
 					return .Illegal_BOM
 				}
 			}
-			read_offset += w
+			t.read_offset += t.w
 			return .None
 		} else {
-			offset = len(src)
-			r = -1
+			t.offset = len(t.src)
+			t.r = -1
 			return
 		}
 	}
@@ -273,26 +271,25 @@ _extract_xml_entity :: proc(t: ^Tokenizer) -> (entity: string, err: Error) {
 		All of these would be in the ASCII range.
 		Even if one is not, it doesn't matter. All characters we need to compare to extract are.
 	*/
-	using t
 
 	length := len(t.src)
 	found  := false
 
 	#no_bounds_check {
-		for read_offset < length {
-			if src[read_offset] == ';' {
+		for t.read_offset < length {
+			if t.src[t.read_offset] == ';' {
+				t.read_offset += 1
 				found = true
-				read_offset += 1
 				break
 			}
-			read_offset += 1
+			t.read_offset += 1
 		}
 	}
 
 	if found {
-		return string(src[offset + 1 : read_offset - 1]), .None
+		return string(t.src[t.offset + 1 : t.read_offset - 1]), .None
 	}
-	return string(src[offset : read_offset]), .Invalid_Entity_Encoding
+	return string(t.src[t.offset : t.read_offset]), .Invalid_Entity_Encoding
 }
 
 /*

+ 21 - 23
core/encoding/xml/debug_print.odin

@@ -19,43 +19,39 @@ import "core:fmt"
 */
 print :: proc(writer: io.Writer, doc: ^Document) -> (written: int, err: io.Error) {
 	if doc == nil { return }
-	using fmt
-
-	written += wprintf(writer, "[XML Prolog]\n")
+	written += fmt.wprintf(writer, "[XML Prolog]\n")
 
 	for attr in doc.prologue {
-		written += wprintf(writer, "\t%v: %v\n", attr.key, attr.val)
+		written += fmt.wprintf(writer, "\t%v: %v\n", attr.key, attr.val)
 	}
 
-	written += wprintf(writer, "[Encoding] %v\n", doc.encoding)
+	written += fmt.wprintf(writer, "[Encoding] %v\n", doc.encoding)
 
 	if len(doc.doctype.ident) > 0 {
-		written += wprintf(writer, "[DOCTYPE]  %v\n", doc.doctype.ident)
+		written += fmt.wprintf(writer, "[DOCTYPE]  %v\n", doc.doctype.ident)
 
 		if len(doc.doctype.rest) > 0 {
-		 	wprintf(writer, "\t%v\n", doc.doctype.rest)
+		 	fmt.wprintf(writer, "\t%v\n", doc.doctype.rest)
 		}
 	}
 
 	for comment in doc.comments {
-		written += wprintf(writer, "[Pre-root comment]  %v\n", comment)
+		written += fmt.wprintf(writer, "[Pre-root comment]  %v\n", comment)
 	}
 
 	if len(doc.elements) > 0 {
-	 	wprintln(writer, " --- ")
+	 	fmt.wprintln(writer, " --- ")
 	 	print_element(writer, doc, 0)
-	 	wprintln(writer, " --- ")
+	 	fmt.wprintln(writer, " --- ")
 	 }
 
 	return written, .None
 }
 
 print_element :: proc(writer: io.Writer, doc: ^Document, element_id: Element_ID, indent := 0) -> (written: int, err: io.Error) {
-	using fmt
-
 	tab :: proc(writer: io.Writer, indent: int) {
 		for _ in 0..=indent {
-			wprintf(writer, "\t")
+			fmt.wprintf(writer, "\t")
 		}
 	}
 
@@ -64,22 +60,24 @@ print_element :: proc(writer: io.Writer, doc: ^Document, element_id: Element_ID,
 	element := doc.elements[element_id]
 
 	if element.kind == .Element {
-		wprintf(writer, "<%v>\n", element.ident)
-		if len(element.value) > 0 {
-			tab(writer, indent + 1)
-			wprintf(writer, "[Value] %v\n", element.value)
+		fmt.wprintf(writer, "<%v>\n", element.ident)
+
+		for value in element.value {
+			switch v in value {
+			case string:
+				tab(writer, indent + 1)
+				fmt.wprintf(writer, "[Value] %v\n", v)
+			case Element_ID:
+				print_element(writer, doc, v, indent + 1)
+			}
 		}
 
 		for attr in element.attribs {
 			tab(writer, indent + 1)
-			wprintf(writer, "[Attr] %v: %v\n", attr.key, attr.val)
-		}
-
-		for child in element.children {
-			print_element(writer, doc, child, indent + 1)
+			fmt.wprintf(writer, "[Attr] %v: %v\n", attr.key, attr.val)
 		}
 	} else if element.kind == .Comment {
-		wprintf(writer, "[COMMENT] %v\n", element.value)
+		fmt.wprintf(writer, "[COMMENT] %v\n", element.value)
 	}
 
 	return written, .None

+ 3 - 3
core/encoding/xml/example/xml_example.odin

@@ -72,10 +72,10 @@ example :: proc() {
 	 	return
 	}
 
-	printf("Found `<charlist>` with %v children, %v elements total\n", len(docs[0].elements[charlist].children), docs[0].element_count)
+	printf("Found `<charlist>` with %v children, %v elements total\n", len(docs[0].elements[charlist].value), docs[0].element_count)
 
-	crc32 := doc_hash(docs[0])
-	printf("[%v] CRC32: 0x%08x\n", "🎉" if crc32 == 0xcaa042b9 else "🤬", crc32)
+	crc32 := doc_hash(docs[0], false)
+	printf("[%v] CRC32: 0x%08x\n", "🎉" if crc32 == 0x420dbac5 else "🤬", crc32)
 
 	for round in 0..<N {
 		defer xml.destroy(docs[round])

+ 17 - 12
core/encoding/xml/helpers.odin

@@ -13,20 +13,25 @@ find_child_by_ident :: proc(doc: ^Document, parent_id: Element_ID, ident: string
 	tag := doc.elements[parent_id]
 
 	count := 0
-	for child_id in tag.children {
-		child := doc.elements[child_id]
-		/*
-			Skip commments. They have no name.
-		*/
-		if child.kind  != .Element                { continue }
+	for v in tag.value {
+		switch child_id in v {
+		case string: continue
+		case Element_ID:
+			child := doc.elements[child_id]
+			/*
+				Skip commments. They have no name.
+			*/
+			if child.kind  != .Element                { continue }
 
-		/*
-			If the ident matches and it's the nth such child, return it.
-		*/
-		if child.ident == ident {
-			if count == nth                       { return child_id, true }
-			count += 1
+			/*
+				If the ident matches and it's the nth such child, return it.
+			*/
+			if child.ident == ident {
+				if count == nth                       { return child_id, true }
+				count += 1
+			}
 		}
+
 	}
 	return 0, false
 }

+ 16 - 16
core/encoding/xml/tokenizer.odin

@@ -125,38 +125,38 @@ error :: proc(t: ^Tokenizer, offset: int, msg: string, args: ..any) {
 }
 
 @(optimization_mode="speed")
-advance_rune :: proc(using t: ^Tokenizer) {
+advance_rune :: proc(t: ^Tokenizer) {
 	#no_bounds_check {
 		/*
 			Already bounds-checked here.
 		*/
-		if read_offset < len(src) {
-			offset = read_offset
-			if ch == '\n' {
-				line_offset = offset
-				line_count += 1
+		if t.read_offset < len(t.src) {
+			t.offset = t.read_offset
+			if t.ch == '\n' {
+				t.line_offset = t.offset
+				t.line_count += 1
 			}
-			r, w := rune(src[read_offset]), 1
+			r, w := rune(t.src[t.read_offset]), 1
 			switch {
 			case r == 0:
 				error(t, t.offset, "illegal character NUL")
 			case r >= utf8.RUNE_SELF:
-				r, w = #force_inline utf8.decode_rune_in_string(src[read_offset:])
+				r, w = #force_inline utf8.decode_rune_in_string(t.src[t.read_offset:])
 				if r == utf8.RUNE_ERROR && w == 1 {
 					error(t, t.offset, "illegal UTF-8 encoding")
-				} else if r == utf8.RUNE_BOM && offset > 0 {
+				} else if r == utf8.RUNE_BOM && t.offset > 0 {
 					error(t, t.offset, "illegal byte order mark")
 				}
 			}
-			read_offset += w
-			ch = r
+			t.read_offset += w
+			t.ch = r
 		} else {
-			offset = len(src)
-			if ch == '\n' {
-				line_offset = offset
-				line_count += 1
+			t.offset = len(t.src)
+			if t.ch == '\n' {
+				t.line_offset = t.offset
+				t.line_count += 1
 			}
-			ch = -1
+			t.ch = -1
 		}
 	}
 }

+ 15 - 27
core/encoding/xml/xml_reader.odin

@@ -125,16 +125,19 @@ Document :: struct {
 
 Element :: struct {
 	ident:   string,
-	value:   string,
+	value:   [dynamic]Value,
 	attribs: Attributes,
 
 	kind: enum {
 		Element = 0,
 		Comment,
 	},
-
 	parent:   Element_ID,
-	children: [dynamic]Element_ID,
+}
+
+Value :: union {
+	string,
+	Element_ID,
 }
 
 Attribute :: struct {
@@ -247,9 +250,6 @@ parse_bytes :: proc(data: []u8, options := DEFAULT_OPTIONS, path := "", error_ha
 
 	err =            .Unexpected_Token
 	element, parent: Element_ID
-
-	tag_is_open   := false
-	first_element := true
 	open: Token
 
 	/*
@@ -275,16 +275,10 @@ parse_bytes :: proc(data: []u8, options := DEFAULT_OPTIONS, path := "", error_ha
 					e.g. <odin - Start of new element.
 				*/
 				element = new_element(doc)
-				tag_is_open = true
-
-				if first_element {
-					/*
-						First element.
-					*/
-					parent   = element
-					first_element = false
+				if element == 0 { // First Element
+					parent = element
 				} else {
-					append(&doc.elements[parent].children, element)
+					append(&doc.elements[parent].value, element)
 				}
 
 				doc.elements[element].parent = parent
@@ -324,7 +318,6 @@ parse_bytes :: proc(data: []u8, options := DEFAULT_OPTIONS, path := "", error_ha
 					expect(t, .Gt) or_return
 					parent      = doc.elements[element].parent
 					element     = parent
-					tag_is_open = false
 
 				case:
 					error(t, t.offset, "Expected close tag, got: %#v\n", end_token)
@@ -344,7 +337,6 @@ parse_bytes :: proc(data: []u8, options := DEFAULT_OPTIONS, path := "", error_ha
 				}
 				parent      = doc.elements[element].parent
 				element     = parent
-				tag_is_open = false
 
 			} else if open.kind == .Exclaim {
 				/*
@@ -392,8 +384,8 @@ parse_bytes :: proc(data: []u8, options := DEFAULT_OPTIONS, path := "", error_ha
 							el := new_element(doc)
 							doc.elements[el].parent = element
 							doc.elements[el].kind   = .Comment
-							doc.elements[el].value  = comment
-							append(&doc.elements[element].children, el)
+							append(&doc.elements[el].value, comment)
+							append(&doc.elements[element].value, el)
 						}
 					}
 
@@ -436,9 +428,6 @@ parse_bytes :: proc(data: []u8, options := DEFAULT_OPTIONS, path := "", error_ha
 			/*
 				End of file.
 			*/
-			if tag_is_open {
-				return doc, .Premature_EOF
-			}
 			break loop
 
 		case:
@@ -450,7 +439,7 @@ parse_bytes :: proc(data: []u8, options := DEFAULT_OPTIONS, path := "", error_ha
 			needs_processing |= .Decode_SGML_Entities in opts.flags
 
 			if !needs_processing {
-				doc.elements[element].value = body_text
+				append(&doc.elements[element].value, body_text)
 				continue
 			}
 
@@ -472,10 +461,10 @@ parse_bytes :: proc(data: []u8, options := DEFAULT_OPTIONS, path := "", error_ha
 
 			decoded, decode_err := entity.decode_xml(body_text, decode_opts)
 			if decode_err == .None {
-				doc.elements[element].value = decoded
+				append(&doc.elements[element].value, decoded)
 				append(&doc.strings_to_free, decoded)
 			} else {
-				doc.elements[element].value = body_text
+				append(&doc.elements[element].value, body_text)
 			}
 		}
 	}
@@ -518,7 +507,7 @@ destroy :: proc(doc: ^Document) {
 
 	for el in doc.elements {
 		delete(el.attribs)
-		delete(el.children)
+		delete(el.value)
 	}
 	delete(doc.elements)
 
@@ -710,6 +699,5 @@ new_element :: proc(doc: ^Document) -> (id: Element_ID) {
 
 	cur := doc.element_count
 	doc.element_count += 1
-
 	return cur
 }

+ 11 - 11
core/fmt/fmt.odin

@@ -835,22 +835,22 @@ int_from_arg :: proc(args: []any, arg_index: int) -> (int, int, bool) {
 // - fi: A pointer to an Info structure
 // - verb: The invalid format verb
 //
-fmt_bad_verb :: proc(using fi: ^Info, verb: rune) {
+fmt_bad_verb :: proc(fi: ^Info, verb: rune) {
 	prev_in_bad := fi.in_bad
 	defer fi.in_bad = prev_in_bad
 	fi.in_bad = true
 
-	io.write_string(writer, "%!", &fi.n)
-	io.write_rune(writer, verb, &fi.n)
-	io.write_byte(writer, '(', &fi.n)
-	if arg.id != nil {
-		reflect.write_typeid(writer, arg.id, &fi.n)
-		io.write_byte(writer, '=', &fi.n)
-		fmt_value(fi, arg, 'v')
+	io.write_string(fi.writer, "%!", &fi.n)
+	io.write_rune(fi.writer, verb, &fi.n)
+	io.write_byte(fi.writer, '(', &fi.n)
+	if fi.arg.id != nil {
+		reflect.write_typeid(fi.writer, fi.arg.id, &fi.n)
+		io.write_byte(fi.writer, '=', &fi.n)
+		fmt_value(fi, fi.arg, 'v')
 	} else {
-		io.write_string(writer, "<nil>", &fi.n)
+		io.write_string(fi.writer, "<nil>", &fi.n)
 	}
-	io.write_byte(writer, ')', &fi.n)
+	io.write_byte(fi.writer, ')', &fi.n)
 }
 // Formats a boolean value according to the specified format verb
 //
@@ -859,7 +859,7 @@ fmt_bad_verb :: proc(using fi: ^Info, verb: rune) {
 // - b: The boolean value to format
 // - verb: The format verb
 //
-fmt_bool :: proc(using fi: ^Info, b: bool, verb: rune) {
+fmt_bool :: proc(fi: ^Info, b: bool, verb: rune) {
 	switch verb {
 	case 't', 'v':
 		fmt_string(fi, b ? "true" : "false", 's')

+ 8 - 7
core/image/netpbm/helpers.odin

@@ -4,13 +4,14 @@ import "core:bytes"
 import "core:image"
 
 destroy :: proc(img: ^image.Image) -> bool {
-	if img == nil do return false
+	if img == nil {
+		return false
+	}
 
 	defer free(img)
 	bytes.buffer_destroy(&img.pixels)
 
-	info, ok := img.metadata.(^image.Netpbm_Info)
-	if !ok do return false
+	info := img.metadata.(^image.Netpbm_Info) or_return
 
 	header_destroy(&info.header)
 	free(info)
@@ -19,9 +20,9 @@ destroy :: proc(img: ^image.Image) -> bool {
 	return true
 }
 
-header_destroy :: proc(using header: ^Header) {
-	if format == .P7 && tupltype != "" {
-		delete(tupltype)
-		tupltype = ""
+header_destroy :: proc(header: ^Header) {
+	if header.format == .P7 && header.tupltype != "" {
+		delete(header.tupltype)
+		header.tupltype = ""
 	}
 }

+ 1 - 0
core/image/netpbm/netpbm.odin

@@ -1,3 +1,4 @@
+//+vet !using-stmt
 package netpbm
 
 import "core:bytes"

+ 3 - 4
core/image/png/helpers.odin

@@ -80,11 +80,10 @@ time :: proc(c: image.PNG_Chunk) -> (res: tIME, ok: bool) {
 }
 
 core_time :: proc(c: image.PNG_Chunk) -> (t: coretime.Time, ok: bool) {
-	if png_time, png_ok := time(c); png_ok {
-		using png_time
+	if t, png_ok := time(c); png_ok {
 		return coretime.datetime_to_time(
-			int(year), int(month), int(day),
-			int(hour), int(minute), int(second),
+			int(t.year), int(t.month),  int(t.day),
+			int(t.hour), int(t.minute), int(t.second),
 		)
 	} else {
 		return {}, false

+ 8 - 8
core/image/png/png.odin

@@ -11,6 +11,7 @@
 // package png implements a PNG image reader
 //
 // The PNG specification is at https://www.w3.org/TR/PNG/.
+//+vet !using-stmt
 package png
 
 import "core:compress"
@@ -444,15 +445,14 @@ load_from_context :: proc(ctx: ^$C, options := Options{}, allocator := context.a
 			img.width  = int(header.width)
 			img.height = int(header.height)
 
-			using header
 			h := image.PNG_IHDR{
-				width              = width,
-				height             = height,
-				bit_depth          = bit_depth,
-				color_type         = color_type,
-				compression_method = compression_method,
-				filter_method      = filter_method,
-				interlace_method   = interlace_method,
+				width              = header.width,
+				height             = header.height,
+				bit_depth          = header.bit_depth,
+				color_type         = header.color_type,
+				compression_method = header.compression_method,
+				filter_method      = header.filter_method,
+				interlace_method   = header.interlace_method,
 			}
 			info.header = h
 

+ 12 - 12
core/math/math.odin

@@ -2286,20 +2286,20 @@ F64_MASK  :: 0x7ff
 F64_SHIFT :: 64 - 12
 F64_BIAS  :: 0x3ff
 
-INF_F16     :f16: 0h7C00
-NEG_INF_F16 :f16: 0hFC00
+INF_F16     :: f16(0h7C00)
+NEG_INF_F16 :: f16(0hFC00)
 
-SNAN_F16    :f16: 0h7C01
-QNAN_F16    :f16: 0h7E01
+SNAN_F16    :: f16(0h7C01)
+QNAN_F16    :: f16(0h7E01)
 
-INF_F32     :f32: 0h7F80_0000
-NEG_INF_F32 :f32: 0hFF80_0000
+INF_F32     :: f32(0h7F80_0000)
+NEG_INF_F32 :: f32(0hFF80_0000)
 
-SNAN_F32    :f32: 0hFF80_0001
-QNAN_F32    :f32: 0hFFC0_0001
+SNAN_F32    :: f32(0hFF80_0001)
+QNAN_F32    :: f32(0hFFC0_0001)
 
-INF_F64     :f64: 0h7FF0_0000_0000_0000
-NEG_INF_F64 :f64: 0hFFF0_0000_0000_0000
+INF_F64     :: f64(0h7FF0_0000_0000_0000)
+NEG_INF_F64 :: f64(0hFFF0_0000_0000_0000)
 
-SNAN_F64    :f64: 0h7FF0_0000_0000_0001
-QNAN_F64    :f64: 0h7FF8_0000_0000_0001
+SNAN_F64    :: f64(0h7FF0_0000_0000_0001)
+QNAN_F64    :: f64(0h7FF8_0000_0000_0001)

+ 52 - 52
core/mem/allocators.odin

@@ -111,11 +111,11 @@ begin_arena_temp_memory :: proc(a: ^Arena) -> Arena_Temp_Memory {
 	return tmp
 }
 
-end_arena_temp_memory :: proc(using tmp: Arena_Temp_Memory) {
-	assert(arena.offset >= prev_offset)
-	assert(arena.temp_count > 0)
-	arena.offset = prev_offset
-	arena.temp_count -= 1
+end_arena_temp_memory :: proc(tmp: Arena_Temp_Memory) {
+	assert(tmp.arena.offset >= tmp.prev_offset)
+	assert(tmp.arena.temp_count > 0)
+	tmp.arena.offset = tmp.prev_offset
+	tmp.arena.temp_count -= 1
 }
 
 
@@ -702,11 +702,11 @@ dynamic_pool_init :: proc(pool: ^Dynamic_Pool,
 	pool.         used_blocks.allocator = array_allocator
 }
 
-dynamic_pool_destroy :: proc(using pool: ^Dynamic_Pool) {
+dynamic_pool_destroy :: proc(pool: ^Dynamic_Pool) {
 	dynamic_pool_free_all(pool)
-	delete(unused_blocks)
-	delete(used_blocks)
-	delete(out_band_allocations)
+	delete(pool.unused_blocks)
+	delete(pool.used_blocks)
+	delete(pool.out_band_allocations)
 
 	zero(pool, size_of(pool^))
 }
@@ -719,90 +719,90 @@ dynamic_pool_alloc :: proc(pool: ^Dynamic_Pool, bytes: int) -> (rawptr, Allocato
 }
 
 @(require_results)
-dynamic_pool_alloc_bytes :: proc(using pool: ^Dynamic_Pool, bytes: int) -> ([]byte, Allocator_Error) {
-	cycle_new_block :: proc(using pool: ^Dynamic_Pool) -> (err: Allocator_Error) {
-		if block_allocator.procedure == nil {
+dynamic_pool_alloc_bytes :: proc(p: ^Dynamic_Pool, bytes: int) -> ([]byte, Allocator_Error) {
+	cycle_new_block :: proc(p: ^Dynamic_Pool) -> (err: Allocator_Error) {
+		if p.block_allocator.procedure == nil {
 			panic("You must call pool_init on a Pool before using it")
 		}
 
-		if current_block != nil {
-			append(&used_blocks, current_block)
+		if p.current_block != nil {
+			append(&p.used_blocks, p.current_block)
 		}
 
 		new_block: rawptr
-		if len(unused_blocks) > 0 {
-			new_block = pop(&unused_blocks)
+		if len(p.unused_blocks) > 0 {
+			new_block = pop(&p.unused_blocks)
 		} else {
 			data: []byte
-			data, err = block_allocator.procedure(block_allocator.data, Allocator_Mode.Alloc,
-			                                           block_size, alignment,
-			                                           nil, 0)
+			data, err = p.block_allocator.procedure(p.block_allocator.data, Allocator_Mode.Alloc,
+			                                        p.block_size, p.alignment,
+			                                        nil, 0)
 			new_block = raw_data(data)
 		}
 
-		bytes_left = block_size
-		current_pos = new_block
-		current_block = new_block
+		p.bytes_left    = p.block_size
+		p.current_pos   = new_block
+		p.current_block = new_block
 		return
 	}
 
 	n := bytes
-	extra := alignment - (n % alignment)
+	extra := p.alignment - (n % p.alignment)
 	n += extra
-	if n >= out_band_size {
-		assert(block_allocator.procedure != nil)
-		memory, err := block_allocator.procedure(block_allocator.data, Allocator_Mode.Alloc,
-			                                block_size, alignment,
-			                                nil, 0)
+	if n >= p.out_band_size {
+		assert(p.block_allocator.procedure != nil)
+		memory, err := p.block_allocator.procedure(p.block_allocator.data, Allocator_Mode.Alloc,
+		                                           p.block_size, p.alignment,
+		                                           nil, 0)
 		if memory != nil {
-			append(&out_band_allocations, raw_data(memory))
+			append(&p.out_band_allocations, raw_data(memory))
 		}
 		return memory, err
 	}
 
-	if bytes_left < n {
-		err := cycle_new_block(pool)
+	if p.bytes_left < n {
+		err := cycle_new_block(p)
 		if err != nil {
 			return nil, err
 		}
-		if current_block == nil {
+		if p.current_block == nil {
 			return nil, .Out_Of_Memory
 		}
 	}
 
-	memory := current_pos
-	current_pos = ptr_offset((^byte)(current_pos), n)
-	bytes_left -= n
-	return byte_slice(memory, bytes), nil
+	memory := p.current_pos
+	p.current_pos = ([^]byte)(p.current_pos)[n:]
+	p.bytes_left -= n
+	return ([^]byte)(memory)[:bytes], nil
 }
 
 
-dynamic_pool_reset :: proc(using pool: ^Dynamic_Pool) {
-	if current_block != nil {
-		append(&unused_blocks, current_block)
-		current_block = nil
+dynamic_pool_reset :: proc(p: ^Dynamic_Pool) {
+	if p.current_block != nil {
+		append(&p.unused_blocks, p.current_block)
+		p.current_block = nil
 	}
 
-	for block in used_blocks {
-		append(&unused_blocks, block)
+	for block in p.used_blocks {
+		append(&p.unused_blocks, block)
 	}
-	clear(&used_blocks)
+	clear(&p.used_blocks)
 
-	for a in out_band_allocations {
-		free(a, block_allocator)
+	for a in p.out_band_allocations {
+		free(a, p.block_allocator)
 	}
-	clear(&out_band_allocations)
+	clear(&p.out_band_allocations)
 
-	bytes_left = 0 // Make new allocations call `cycle_new_block` again.
+	p.bytes_left = 0 // Make new allocations call `cycle_new_block` again.
 }
 
-dynamic_pool_free_all :: proc(using pool: ^Dynamic_Pool) {
-	dynamic_pool_reset(pool)
+dynamic_pool_free_all :: proc(p: ^Dynamic_Pool) {
+	dynamic_pool_reset(p)
 
-	for block in unused_blocks {
-		free(block, block_allocator)
+	for block in p.unused_blocks {
+		free(block, p.block_allocator)
 	}
-	clear(&unused_blocks)
+	clear(&p.unused_blocks)
 }
 
 

+ 35 - 35
core/net/url.odin

@@ -63,100 +63,100 @@ split_url :: proc(url: string, allocator := context.allocator) -> (scheme, host,
 }
 
 join_url :: proc(scheme, host, path: string, queries: map[string]string, allocator := context.allocator) -> string {
-	using strings
+	b := strings.builder_make(allocator)
+	strings.builder_grow(&b, len(scheme) + 3 + len(host) + 1 + len(path))
 
-	b := builder_make(allocator)
-	builder_grow(&b, len(scheme) + 3 + len(host) + 1 + len(path))
-
-	write_string(&b, scheme)
-	write_string(&b, "://")
-	write_string(&b, trim_space(host))
+	strings.write_string(&b, scheme)
+	strings.write_string(&b, "://")
+	strings.write_string(&b, strings.trim_space(host))
 
 	if path != "" {
-		if path[0] != '/' do write_string(&b, "/")
-		write_string(&b, trim_space(path))
+		if path[0] != '/' {
+			strings.write_string(&b, "/")
+		}
+		strings.write_string(&b, strings.trim_space(path))
 	}
 
 
 	query_length := len(queries)
-	if query_length > 0 do write_string(&b, "?")
+	if query_length > 0 {
+		strings.write_string(&b, "?")
+	}
 	i := 0
 	for query_name, query_value in queries {
-		write_string(&b, query_name)
+		strings.write_string(&b, query_name)
 		if query_value != "" {
-			write_string(&b, "=")
-			write_string(&b, query_value)
+			strings.write_string(&b, "=")
+			strings.write_string(&b, query_value)
 		}
 		if i < query_length - 1 {
-			write_string(&b, "&")
+			strings.write_string(&b, "&")
 		}
 		i += 1
 	}
 
-	return to_string(b)
+	return strings.to_string(b)
 }
 
 percent_encode :: proc(s: string, allocator := context.allocator) -> string {
-	using strings
-
-	b := builder_make(allocator)
-	builder_grow(&b, len(s) + 16) // NOTE(tetra): A reasonable number to allow for the number of things we need to escape.
+	b := strings.builder_make(allocator)
+	strings.builder_grow(&b, len(s) + 16) // NOTE(tetra): A reasonable number to allow for the number of things we need to escape.
 
 	for ch in s {
 		switch ch {
 		case 'A'..='Z', 'a'..='z', '0'..='9', '-', '_', '.', '~':
-			write_rune(&b, ch)
+			strings.write_rune(&b, ch)
 		case:
 			bytes, n := utf8.encode_rune(ch)
 			for byte in bytes[:n] {
 				buf: [2]u8 = ---
 				t := strconv.append_int(buf[:], i64(byte), 16)
-				write_rune(&b, '%')
-				write_string(&b, t)
+				strings.write_rune(&b, '%')
+				strings.write_string(&b, t)
 			}
 		}
 	}
 
-	return to_string(b)
+	return strings.to_string(b)
 }
 
 percent_decode :: proc(encoded_string: string, allocator := context.allocator) -> (decoded_string: string, ok: bool) {
-	using strings
-
-	b := builder_make(allocator)
-	builder_grow(&b, len(encoded_string))
-	defer if !ok do builder_destroy(&b)
+	b := strings.builder_make(allocator)
+	strings.builder_grow(&b, len(encoded_string))
+	defer if !ok do strings.builder_destroy(&b)
 
 	s := encoded_string
 
 	for len(s) > 0 {
-		i := index_byte(s, '%')
+		i := strings.index_byte(s, '%')
 		if i == -1 {
-			write_string(&b, s) // no '%'s; the string is already decoded
+			strings.write_string(&b, s) // no '%'s; the string is already decoded
 			break
 		}
 
-		write_string(&b, s[:i])
+		strings.write_string(&b, s[:i])
 		s = s[i:]
 
 		if len(s) == 0 do return // percent without anything after it
 		s = s[1:]
 
 		if s[0] == '%' {
-			write_byte(&b, '%')
+			strings.write_byte(&b, '%')
 			s = s[1:]
 			continue
 		}
 
-		if len(s) < 2 do return // percent without encoded value
+		if len(s) < 2 {
+			return // percent without encoded value
+		}
 
 		val := hex.decode_sequence(s[:2]) or_return
-		write_byte(&b, val)
+		strings.write_byte(&b, val)
 		s = s[2:]
 	}
 
 	ok = true
-	decoded_string = to_string(b)
+	decoded_string = strings.to_string(b)
 	return
 }
 

+ 86 - 92
core/odin/printer/visit.odin

@@ -336,22 +336,20 @@ hint_current_line :: proc(p: ^Printer, hint: Line_Type) {
 
 @(private)
 visit_decl :: proc(p: ^Printer, decl: ^ast.Decl, called_in_stmt := false) {
-	using ast
-
 	if decl == nil {
 		return
 	}
 
 	#partial switch v in decl.derived_stmt {
-	case ^Expr_Stmt:
+	case ^ast.Expr_Stmt:
 		move_line(p, decl.pos)
 		visit_expr(p, v.expr)
 		if p.config.semicolons {
 			push_generic_token(p, .Semicolon, 0)
 		}
-	case ^When_Stmt:
-		visit_stmt(p, cast(^Stmt)decl)
-	case ^Foreign_Import_Decl:
+	case ^ast.When_Stmt:
+		visit_stmt(p, cast(^ast.Stmt)decl)
+	case ^ast.Foreign_Import_Decl:
 		if len(v.attributes) > 0 {
 			sort.sort(sort_attribute(&v.attributes))
 			move_line(p, v.attributes[0].pos)
@@ -370,7 +368,7 @@ visit_decl :: proc(p: ^Printer, decl: ^ast.Decl, called_in_stmt := false) {
 		for path in v.fullpaths {
 			push_ident_token(p, path, 0)
 		}
-	case ^Foreign_Block_Decl:
+	case ^ast.Foreign_Block_Decl:
 		if len(v.attributes) > 0 {
 			sort.sort(sort_attribute(&v.attributes))
 			move_line(p, v.attributes[0].pos)
@@ -383,7 +381,7 @@ visit_decl :: proc(p: ^Printer, decl: ^ast.Decl, called_in_stmt := false) {
 
 		visit_expr(p, v.foreign_library)
 		visit_stmt(p, v.body)
-	case ^Import_Decl:
+	case ^ast.Import_Decl:
 		move_line(p, decl.pos)
 
 		if v.name.text != "" {
@@ -395,7 +393,7 @@ visit_decl :: proc(p: ^Printer, decl: ^ast.Decl, called_in_stmt := false) {
 			push_ident_token(p, v.fullpath, 1)
 		}
 
-	case ^Value_Decl:
+	case ^ast.Value_Decl:
 		if len(v.attributes) > 0 {
 			sort.sort(sort_attribute(&v.attributes))
 			move_line(p, v.attributes[0].pos)
@@ -447,9 +445,9 @@ visit_decl :: proc(p: ^Printer, decl: ^ast.Decl, called_in_stmt := false) {
 
 		for value in v.values {
 			#partial switch a in value.derived {
-			case ^Union_Type, ^Enum_Type, ^Struct_Type:
+			case ^ast.Union_Type, ^ast.Enum_Type, ^ast.Struct_Type:
 				add_semicolon = false || called_in_stmt
-			case ^Proc_Lit:
+			case ^ast.Proc_Lit:
 				add_semicolon = false
 			}
 		}
@@ -510,40 +508,38 @@ visit_attributes :: proc(p: ^Printer, attributes: [dynamic]^ast.Attribute) {
 
 @(private)
 visit_stmt :: proc(p: ^Printer, stmt: ^ast.Stmt, block_type: Block_Type = .Generic, empty_block := false, block_stmt := false) {
-	using ast
-
 	if stmt == nil {
 		return
 	}
 
 
 	switch v in stmt.derived_stmt {
-	case ^Bad_Stmt:
-	case ^Bad_Decl:
-	case ^Package_Decl:
+	case ^ast.Bad_Stmt:
+	case ^ast.Bad_Decl:
+	case ^ast.Package_Decl:
 
-	case ^Empty_Stmt:
+	case ^ast.Empty_Stmt:
 		push_generic_token(p, .Semicolon, 0)
-	case ^Tag_Stmt:
+	case ^ast.Tag_Stmt:
 		push_generic_token(p, .Hash, 1)
 		push_generic_token(p, v.op.kind, 1, v.op.text)
 		visit_stmt(p, v.stmt)
 
 
-	case ^Import_Decl:
-		visit_decl(p, cast(^Decl)stmt, true)
+	case ^ast.Import_Decl:
+		visit_decl(p, cast(^ast.Decl)stmt, true)
 		return
-	case ^Value_Decl:
-		visit_decl(p, cast(^Decl)stmt, true)
+	case ^ast.Value_Decl:
+		visit_decl(p, cast(^ast.Decl)stmt, true)
 		return
-	case ^Foreign_Import_Decl:
-		visit_decl(p, cast(^Decl)stmt, true)
+	case ^ast.Foreign_Import_Decl:
+		visit_decl(p, cast(^ast.Decl)stmt, true)
 		return
-	case ^Foreign_Block_Decl:
-		visit_decl(p, cast(^Decl)stmt, true)
+	case ^ast.Foreign_Block_Decl:
+		visit_decl(p, cast(^ast.Decl)stmt, true)
 		return
 
-	case ^Using_Stmt:
+	case ^ast.Using_Stmt:
 		move_line(p, v.pos)
 
 		push_generic_token(p, .Using, 1)
@@ -553,7 +549,7 @@ visit_stmt :: proc(p: ^Printer, stmt: ^ast.Stmt, block_type: Block_Type = .Gener
 		if p.config.semicolons {
 			push_generic_token(p, .Semicolon, 0)
 		}
-	case ^Block_Stmt:
+	case ^ast.Block_Stmt:
 		move_line(p, v.pos)
 
 		if v.pos.line == v.end.line {
@@ -583,7 +579,7 @@ visit_stmt :: proc(p: ^Printer, stmt: ^ast.Stmt, block_type: Block_Type = .Gener
 				visit_end_brace(p, v.end)
 			}
 		}
-	case ^If_Stmt:
+	case ^ast.If_Stmt:
 		move_line(p, v.pos)
 
 		if v.label != nil {
@@ -606,7 +602,7 @@ visit_stmt :: proc(p: ^Printer, stmt: ^ast.Stmt, block_type: Block_Type = .Gener
 
 		uses_do := false
 
-		if check_stmt, ok := v.body.derived.(^Block_Stmt); ok && check_stmt.uses_do {
+		if check_stmt, ok := v.body.derived.(^ast.Block_Stmt); ok && check_stmt.uses_do {
 			uses_do = true
 		}
 
@@ -637,7 +633,7 @@ visit_stmt :: proc(p: ^Printer, stmt: ^ast.Stmt, block_type: Block_Type = .Gener
 
 			visit_stmt(p, v.else_stmt)
 		}
-	case ^Switch_Stmt:
+	case ^ast.Switch_Stmt:
 		move_line(p, v.pos)
 
 		if v.label != nil {
@@ -665,7 +661,7 @@ visit_stmt :: proc(p: ^Printer, stmt: ^ast.Stmt, block_type: Block_Type = .Gener
 
 		visit_expr(p, v.cond)
 		visit_stmt(p, v.body)
-	case ^Case_Clause:
+	case ^ast.Case_Clause:
 		move_line(p, v.pos)
 
 		if !p.config.indent_cases {
@@ -689,7 +685,7 @@ visit_stmt :: proc(p: ^Printer, stmt: ^ast.Stmt, block_type: Block_Type = .Gener
 		if !p.config.indent_cases {
 			indent(p)
 		}
-	case ^Type_Switch_Stmt:
+	case ^ast.Type_Switch_Stmt:
 		move_line(p, v.pos)
 
 		hint_current_line(p, {.Switch_Stmt})
@@ -707,7 +703,7 @@ visit_stmt :: proc(p: ^Printer, stmt: ^ast.Stmt, block_type: Block_Type = .Gener
 
 		visit_stmt(p, v.tag)
 		visit_stmt(p, v.body)
-	case ^Assign_Stmt:
+	case ^ast.Assign_Stmt:
 		move_line(p, v.pos)
 
 		hint_current_line(p, {.Assign})
@@ -721,13 +717,13 @@ visit_stmt :: proc(p: ^Printer, stmt: ^ast.Stmt, block_type: Block_Type = .Gener
 		if block_stmt && p.config.semicolons {
 			push_generic_token(p, .Semicolon, 0)
 		}
-	case ^Expr_Stmt:
+	case ^ast.Expr_Stmt:
 		move_line(p, v.pos)
 		visit_expr(p, v.expr)
 		if block_stmt && p.config.semicolons {
 			push_generic_token(p, .Semicolon, 0)
 		}
-	case ^For_Stmt:
+	case ^ast.For_Stmt:
 		// this should be simplified
 		move_line(p, v.pos)
 
@@ -764,7 +760,7 @@ visit_stmt :: proc(p: ^Printer, stmt: ^ast.Stmt, block_type: Block_Type = .Gener
 
 		visit_stmt(p, v.body)
 
-	case ^Inline_Range_Stmt:
+	case ^ast.Inline_Range_Stmt:
 		move_line(p, v.pos)
 
 		if v.label != nil {
@@ -790,7 +786,7 @@ visit_stmt :: proc(p: ^Printer, stmt: ^ast.Stmt, block_type: Block_Type = .Gener
 		visit_expr(p, v.expr)
 		visit_stmt(p, v.body)
 
-	case ^Range_Stmt:
+	case ^ast.Range_Stmt:
 		move_line(p, v.pos)
 
 		if v.label != nil {
@@ -816,7 +812,7 @@ visit_stmt :: proc(p: ^Printer, stmt: ^ast.Stmt, block_type: Block_Type = .Gener
 		visit_expr(p, v.expr)
 
 		visit_stmt(p, v.body)
-	case ^Return_Stmt:
+	case ^ast.Return_Stmt:
 		move_line(p, v.pos)
 
 		push_generic_token(p, .Return, 1)
@@ -828,7 +824,7 @@ visit_stmt :: proc(p: ^Printer, stmt: ^ast.Stmt, block_type: Block_Type = .Gener
 		if block_stmt && p.config.semicolons {
 			push_generic_token(p, .Semicolon, 0)
 		}
-	case ^Defer_Stmt:
+	case ^ast.Defer_Stmt:
 		move_line(p, v.pos)
 		push_generic_token(p, .Defer, 0)
 
@@ -837,7 +833,7 @@ visit_stmt :: proc(p: ^Printer, stmt: ^ast.Stmt, block_type: Block_Type = .Gener
 		if p.config.semicolons {
 			push_generic_token(p, .Semicolon, 0)
 		}
-	case ^When_Stmt:
+	case ^ast.When_Stmt:
 		move_line(p, v.pos)
 		push_generic_token(p, .When, 1)
 		visit_expr(p, v.cond)
@@ -857,7 +853,7 @@ visit_stmt :: proc(p: ^Printer, stmt: ^ast.Stmt, block_type: Block_Type = .Gener
 			visit_stmt(p, v.else_stmt)
 		}
 
-	case ^Branch_Stmt:
+	case ^ast.Branch_Stmt:
 		move_line(p, v.pos)
 
 		push_generic_token(p, v.tok.kind, 0)
@@ -921,8 +917,6 @@ push_poly_params :: proc(p: ^Printer, poly_params: ^ast.Field_List) {
 
 @(private)
 visit_expr :: proc(p: ^Printer, expr: ^ast.Expr, options := List_Options{}) {
-	using ast
-
 	if expr == nil {
 		return
 	}
@@ -930,14 +924,14 @@ visit_expr :: proc(p: ^Printer, expr: ^ast.Expr, options := List_Options{}) {
 	set_source_position(p, expr.pos)
 
 	switch v in expr.derived_expr {
-	case ^Bad_Expr:
+	case ^ast.Bad_Expr:
 
-	case ^Tag_Expr:
+	case ^ast.Tag_Expr:
 		push_generic_token(p, .Hash, 1)
 		push_generic_token(p, v.op.kind, 1, v.op.text)
 		visit_expr(p, v.expr)
 
-	case ^Inline_Asm_Expr:
+	case ^ast.Inline_Asm_Expr:
 		push_generic_token(p, v.tok.kind, 1, v.tok.text)
 
 		push_generic_token(p, .Open_Paren, 1)
@@ -954,42 +948,42 @@ visit_expr :: proc(p: ^Printer, expr: ^ast.Expr, options := List_Options{}) {
 		push_generic_token(p, .Comma, 0)
 		visit_expr(p, v.constraints_string)
 		push_generic_token(p, .Close_Brace, 0)
-	case ^Undef:
+	case ^ast.Undef:
 		push_generic_token(p, .Undef, 1)
-	case ^Auto_Cast:
+	case ^ast.Auto_Cast:
 		push_generic_token(p, v.op.kind, 1)
 		visit_expr(p, v.expr)
-	case ^Ternary_If_Expr:
+	case ^ast.Ternary_If_Expr:
 		visit_expr(p, v.x)
 		push_generic_token(p, v.op1.kind, 1)
 		visit_expr(p, v.cond)
 		push_generic_token(p, v.op2.kind, 1)
 		visit_expr(p, v.y)
-	case ^Ternary_When_Expr:
+	case ^ast.Ternary_When_Expr:
 		visit_expr(p, v.x)
 		push_generic_token(p, v.op1.kind, 1)
 		visit_expr(p, v.cond)
 		push_generic_token(p, v.op2.kind, 1)
 		visit_expr(p, v.y)
-	case ^Or_Else_Expr:
+	case ^ast.Or_Else_Expr:
 		visit_expr(p, v.x)
 		push_generic_token(p, v.token.kind, 1)
 		visit_expr(p, v.y)
-	case ^Or_Return_Expr:
+	case ^ast.Or_Return_Expr:
 		visit_expr(p, v.expr)
 		push_generic_token(p, v.token.kind, 1)
-	case ^Selector_Call_Expr:
+	case ^ast.Selector_Call_Expr:
 		visit_expr(p, v.call.expr)
 		push_generic_token(p, .Open_Paren, 1)
 		visit_exprs(p, v.call.args, {.Add_Comma})
 		push_generic_token(p, .Close_Paren, 0)
-	case ^Ellipsis:
+	case ^ast.Ellipsis:
 		push_generic_token(p, .Ellipsis, 1)
 		visit_expr(p, v.expr)
-	case ^Relative_Type:
+	case ^ast.Relative_Type:
 		visit_expr(p, v.tag)
 		visit_expr(p, v.type)
-	case ^Slice_Expr:
+	case ^ast.Slice_Expr:
 		visit_expr(p, v.expr)
 		push_generic_token(p, .Open_Bracket, 0)
 		visit_expr(p, v.low)
@@ -999,37 +993,37 @@ visit_expr :: proc(p: ^Printer, expr: ^ast.Expr, options := List_Options{}) {
 			visit_expr(p, v.high)
 		}
 		push_generic_token(p, .Close_Bracket, 0)
-	case ^Ident:
+	case ^ast.Ident:
 		if .Enforce_Poly_Names in options {
 			push_generic_token(p, .Dollar, 1)
 			push_ident_token(p, v.name, 0)
 		} else {
 			push_ident_token(p, v.name, 1)
 		}
-	case ^Deref_Expr:
+	case ^ast.Deref_Expr:
 		visit_expr(p, v.expr)
 		push_generic_token(p, v.op.kind, 0)
-	case ^Type_Cast:
+	case ^ast.Type_Cast:
 		push_generic_token(p, v.tok.kind, 1)
 		push_generic_token(p, .Open_Paren, 0)
 		visit_expr(p, v.type)
 		push_generic_token(p, .Close_Paren, 0)
 		merge_next_token(p)
 		visit_expr(p, v.expr)
-	case ^Basic_Directive:
+	case ^ast.Basic_Directive:
 		push_generic_token(p, v.tok.kind, 1)
 		push_ident_token(p, v.name, 0)
-	case ^Distinct_Type:
+	case ^ast.Distinct_Type:
 		push_generic_token(p, .Distinct, 1)
 		visit_expr(p, v.type)
-	case ^Dynamic_Array_Type:
+	case ^ast.Dynamic_Array_Type:
 		visit_expr(p, v.tag)
 		push_generic_token(p, .Open_Bracket, 1)
 		push_generic_token(p, .Dynamic, 0)
 		push_generic_token(p, .Close_Bracket, 0)
 		merge_next_token(p)
 		visit_expr(p, v.elem)
-	case ^Bit_Set_Type:
+	case ^ast.Bit_Set_Type:
 		push_generic_token(p, .Bit_Set, 1)
 		push_generic_token(p, .Open_Bracket, 0)
 
@@ -1041,7 +1035,7 @@ visit_expr :: proc(p: ^Printer, expr: ^ast.Expr, options := List_Options{}) {
 		}
 
 		push_generic_token(p, .Close_Bracket, 0)
-	case ^Union_Type:
+	case ^ast.Union_Type:
 		push_generic_token(p, .Union, 1)
 
 		push_poly_params(p, v.poly_params)
@@ -1066,7 +1060,7 @@ visit_expr :: proc(p: ^Printer, expr: ^ast.Expr, options := List_Options{}) {
 			visit_exprs(p, v.variants, {.Add_Comma, .Trailing})
 			visit_end_brace(p, v.end)
 		}
-	case ^Enum_Type:
+	case ^ast.Enum_Type:
 		push_generic_token(p, .Enum, 1)
 
 		hint_current_line(p, {.Enum})
@@ -1089,7 +1083,7 @@ visit_expr :: proc(p: ^Printer, expr: ^ast.Expr, options := List_Options{}) {
 		}
 
 		set_source_position(p, v.end)
-	case ^Struct_Type:
+	case ^ast.Struct_Type:
 		push_generic_token(p, .Struct, 1)
 
 		hint_current_line(p, {.Struct})
@@ -1124,7 +1118,7 @@ visit_expr :: proc(p: ^Printer, expr: ^ast.Expr, options := List_Options{}) {
 		}
 
 		set_source_position(p, v.end)
-	case ^Proc_Lit:
+	case ^ast.Proc_Lit:
 		switch v.inlining {
 		case .None:
 		case .Inline:
@@ -1143,16 +1137,16 @@ visit_expr :: proc(p: ^Printer, expr: ^ast.Expr, options := List_Options{}) {
 		} else {
 			push_generic_token(p, .Undef, 1)
 		}
-	case ^Proc_Type:
+	case ^ast.Proc_Type:
 		visit_proc_type(p, v)
-	case ^Basic_Lit:
+	case ^ast.Basic_Lit:
 		push_generic_token(p, v.tok.kind, 1, v.tok.text)
-	case ^Binary_Expr:
+	case ^ast.Binary_Expr:
 		visit_binary_expr(p, v)
-	case ^Implicit_Selector_Expr:
+	case ^ast.Implicit_Selector_Expr:
 		push_generic_token(p, .Period, 1)
 		push_ident_token(p, v.field.name, 0)
-	case ^Call_Expr:
+	case ^ast.Call_Expr:
 		visit_expr(p, v.expr)
 
 		push_format_token(p,
@@ -1167,34 +1161,34 @@ visit_expr :: proc(p: ^Printer, expr: ^ast.Expr, options := List_Options{}) {
 
 		visit_call_exprs(p, v.args, v.ellipsis.kind == .Ellipsis)
 		push_generic_token(p, .Close_Paren, 0)
-	case ^Typeid_Type:
+	case ^ast.Typeid_Type:
 		push_generic_token(p, .Typeid, 1)
 
 		if v.specialization != nil {
 			push_generic_token(p, .Quo, 0)
 			visit_expr(p, v.specialization)
 		}
-	case ^Selector_Expr:
+	case ^ast.Selector_Expr:
 		visit_expr(p, v.expr)
 		push_generic_token(p, v.op.kind, 0)
 		visit_expr(p, v.field)
-	case ^Paren_Expr:
+	case ^ast.Paren_Expr:
 		push_generic_token(p, .Open_Paren, 1)
 		visit_expr(p, v.expr)
 		push_generic_token(p, .Close_Paren, 0)
-	case ^Index_Expr:
+	case ^ast.Index_Expr:
 		visit_expr(p, v.expr)
 		push_generic_token(p, .Open_Bracket, 0)
 		visit_expr(p, v.index)
 		push_generic_token(p, .Close_Bracket, 0)
-	case ^Matrix_Index_Expr:
+	case ^ast.Matrix_Index_Expr:
 		visit_expr(p, v.expr)
 		push_generic_token(p, .Open_Bracket, 0)
 		visit_expr(p, v.row_index)
 		push_generic_token(p, .Comma, 0)
 		visit_expr(p, v.column_index)
 		push_generic_token(p, .Close_Bracket, 0)
-	case ^Proc_Group:
+	case ^ast.Proc_Group:
 		push_generic_token(p, v.tok.kind, 1)
 
 		if len(v.args) != 0 && v.pos.line != v.args[len(v.args) - 1].pos.line {
@@ -1209,7 +1203,7 @@ visit_expr :: proc(p: ^Printer, expr: ^ast.Expr, options := List_Options{}) {
 			push_generic_token(p, .Close_Brace, 0)
 		}
 
-	case ^Comp_Lit:
+	case ^ast.Comp_Lit:
 		if v.type != nil {
 			visit_expr(p, v.type)
 		}
@@ -1226,18 +1220,18 @@ visit_expr :: proc(p: ^Printer, expr: ^ast.Expr, options := List_Options{}) {
 			push_generic_token(p, .Close_Brace, 0)
 		}
 
-	case ^Unary_Expr:
+	case ^ast.Unary_Expr:
 		push_generic_token(p, v.op.kind, 1)
 		merge_next_token(p)
 		visit_expr(p, v.expr)
-	case ^Field_Value:
+	case ^ast.Field_Value:
 		visit_expr(p, v.field)
 		push_generic_token(p, .Eq, 1)
 		visit_expr(p, v.value)
-	case ^Type_Assertion:
+	case ^ast.Type_Assertion:
 		visit_expr(p, v.expr)
 
-		if unary, ok := v.type.derived.(^Unary_Expr); ok && unary.op.text == "?" {
+		if unary, ok := v.type.derived.(^ast.Unary_Expr); ok && unary.op.text == "?" {
 			push_generic_token(p, .Period, 0)
 			visit_expr(p, v.type)
 		} else {
@@ -1247,13 +1241,13 @@ visit_expr :: proc(p: ^Printer, expr: ^ast.Expr, options := List_Options{}) {
 			push_generic_token(p, .Close_Paren, 0)
 		}
 
-	case ^Pointer_Type:
+	case ^ast.Pointer_Type:
 		push_generic_token(p, .Pointer, 1)
 		merge_next_token(p)
 		visit_expr(p, v.elem)
-	case ^Implicit:
+	case ^ast.Implicit:
 		push_generic_token(p, v.tok.kind, 1)
-	case ^Poly_Type:
+	case ^ast.Poly_Type:
 		push_generic_token(p, .Dollar, 1)
 		merge_next_token(p)
 		visit_expr(p, v.type)
@@ -1263,28 +1257,28 @@ visit_expr :: proc(p: ^Printer, expr: ^ast.Expr, options := List_Options{}) {
 			merge_next_token(p)
 			visit_expr(p, v.specialization)
 		}
-	case ^Array_Type:
+	case ^ast.Array_Type:
 		visit_expr(p, v.tag)
 		push_generic_token(p, .Open_Bracket, 1)
 		visit_expr(p, v.len)
 		push_generic_token(p, .Close_Bracket, 0)
 		merge_next_token(p)
 		visit_expr(p, v.elem)
-	case ^Map_Type:
+	case ^ast.Map_Type:
 		push_generic_token(p, .Map, 1)
 		push_generic_token(p, .Open_Bracket, 0)
 		visit_expr(p, v.key)
 		push_generic_token(p, .Close_Bracket, 0)
 		merge_next_token(p)
 		visit_expr(p, v.value)
-	case ^Helper_Type:
+	case ^ast.Helper_Type:
 		visit_expr(p, v.type)
-	case ^Multi_Pointer_Type:
+	case ^ast.Multi_Pointer_Type:
 		push_generic_token(p, .Open_Bracket, 1)
 		push_generic_token(p, .Pointer, 0)
 		push_generic_token(p, .Close_Bracket, 0)
 		visit_expr(p, v.elem)
-	case ^Matrix_Type:
+	case ^ast.Matrix_Type:
 		push_generic_token(p, .Matrix, 1)
 		push_generic_token(p, .Open_Bracket, 0)
 		visit_expr(p, v.row_count)

+ 16 - 16
core/odin/tokenizer/tokenizer.odin

@@ -75,34 +75,34 @@ error :: proc(t: ^Tokenizer, offset: int, msg: string, args: ..any) {
 	t.error_count += 1
 }
 
-advance_rune :: proc(using t: ^Tokenizer) {
-	if read_offset < len(src) {
-		offset = read_offset
-		if ch == '\n' {
-			line_offset = offset
-			line_count += 1
+advance_rune :: proc(t: ^Tokenizer) {
+	if t.read_offset < len(t.src) {
+		t.offset = t.read_offset
+		if t.ch == '\n' {
+			t.line_offset = t.offset
+			t.line_count += 1
 		}
-		r, w := rune(src[read_offset]), 1
+		r, w := rune(t.src[t.read_offset]), 1
 		switch {
 		case r == 0:
 			error(t, t.offset, "illegal character NUL")
 		case r >= utf8.RUNE_SELF:
-			r, w = utf8.decode_rune_in_string(src[read_offset:])
+			r, w = utf8.decode_rune_in_string(t.src[t.read_offset:])
 			if r == utf8.RUNE_ERROR && w == 1 {
 				error(t, t.offset, "illegal UTF-8 encoding")
-			} else if r == utf8.RUNE_BOM && offset > 0 {
+			} else if r == utf8.RUNE_BOM && t.offset > 0 {
 				error(t, t.offset, "illegal byte order mark")
 			}
 		}
-		read_offset += w
-		ch = r
+		t.read_offset += w
+		t.ch = r
 	} else {
-		offset = len(src)
-		if ch == '\n' {
-			line_offset = offset
-			line_count += 1
+		t.offset = len(t.src)
+		if t.ch == '\n' {
+			t.line_offset = t.offset
+			t.line_count += 1
 		}
-		ch = -1
+		t.ch = -1
 	}
 }
 

+ 118 - 98
core/runtime/dynamic_map_internal.odin

@@ -414,68 +414,21 @@ map_insert_hash_dynamic :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^
 	tk := map_cell_index_dynamic(sk, info.ks, 1)
 	tv := map_cell_index_dynamic(sv, info.vs, 1)
 
-	for {
-		hp := &hs[pos]
-		element_hash := hp^
+	swap_loop: for {
+		element_hash := hs[pos]
 
 		if map_hash_is_empty(element_hash) {
-			kp := map_cell_index_dynamic(ks, info.ks, pos)
-			vp := map_cell_index_dynamic(vs, info.vs, pos)
-			intrinsics.mem_copy_non_overlapping(rawptr(kp), rawptr(k), size_of_k)
-			intrinsics.mem_copy_non_overlapping(rawptr(vp), rawptr(v), size_of_v)
-			hp^ = h
+			k_dst := map_cell_index_dynamic(ks, info.ks, pos)
+			v_dst := map_cell_index_dynamic(vs, info.vs, pos)
+			intrinsics.mem_copy_non_overlapping(rawptr(k_dst), rawptr(k), size_of_k)
+			intrinsics.mem_copy_non_overlapping(rawptr(v_dst), rawptr(v), size_of_v)
+			hs[pos] = h
 
-			return result if result != 0 else vp
+			return result if result != 0 else v_dst
 		}
 
 		if map_hash_is_deleted(element_hash) {
-			next_pos := (pos + 1) & mask
-
-			// backward shift
-			for !map_hash_is_empty(hs[next_pos]) {
-				probe_distance := map_probe_distance(m^, hs[next_pos], next_pos)
-				if probe_distance == 0 {
-					break
-				}
-				probe_distance -= 1
-
-				kp := map_cell_index_dynamic(ks, info.ks, pos)
-				vp := map_cell_index_dynamic(vs, info.vs, pos)
-				kn := map_cell_index_dynamic(ks, info.ks, next_pos)
-				vn := map_cell_index_dynamic(vs, info.vs, next_pos)
-
-				if distance > probe_distance {
-					if result == 0 {
-						result = vp
-					}
-					// move stored into pos; store next
-					intrinsics.mem_copy_non_overlapping(rawptr(kp), rawptr(k), size_of_k)
-					intrinsics.mem_copy_non_overlapping(rawptr(vp), rawptr(v), size_of_v)
-					hs[pos] = h
-
-					intrinsics.mem_copy_non_overlapping(rawptr(k), rawptr(kn), size_of_k)
-					intrinsics.mem_copy_non_overlapping(rawptr(v), rawptr(vn), size_of_v)
-					h = hs[next_pos]
-				} else {
-					// move next back 1
-					intrinsics.mem_copy_non_overlapping(rawptr(kp), rawptr(kn), size_of_k)
-					intrinsics.mem_copy_non_overlapping(rawptr(vp), rawptr(vn), size_of_v)
-					hs[pos] = hs[next_pos]
-					distance = probe_distance
-				}
-				hs[next_pos] = 0
-				pos = (pos + 1) & mask
-				next_pos = (next_pos + 1) & mask
-				distance += 1
-			}
-
-			kp := map_cell_index_dynamic(ks, info.ks, pos)
-			vp := map_cell_index_dynamic(vs, info.vs, pos)
-			intrinsics.mem_copy_non_overlapping(rawptr(kp), rawptr(k), size_of_k)
-			intrinsics.mem_copy_non_overlapping(rawptr(vp), rawptr(v), size_of_v)
-			hs[pos] = h
-
-			return result if result != 0 else vp
+			break swap_loop
 		}
 
 		if probe_distance := map_probe_distance(m^, element_hash, pos); distance > probe_distance {
@@ -495,8 +448,8 @@ map_insert_hash_dynamic :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^
 			intrinsics.mem_copy_non_overlapping(rawptr(vp), rawptr(tv), size_of_v)
 
 			th := h
-			h = hp^
-			hp^ = th
+			h = hs[pos]
+			hs[pos] = th
 
 			distance = probe_distance
 		}
@@ -504,6 +457,103 @@ map_insert_hash_dynamic :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^
 		pos = (pos + 1) & mask
 		distance += 1
 	}
+
+	// backward shift loop
+	hs[pos] = 0
+	look_ahead: uintptr = 1
+	for {
+		la_pos := (pos + look_ahead) & mask
+		element_hash := hs[la_pos]
+
+		if map_hash_is_deleted(element_hash) {
+			look_ahead += 1
+			hs[la_pos] = 0
+			continue
+		}
+
+		k_dst := map_cell_index_dynamic(ks, info.ks, pos)
+		v_dst := map_cell_index_dynamic(vs, info.vs, pos)
+
+		if map_hash_is_empty(element_hash) {
+			intrinsics.mem_copy_non_overlapping(rawptr(k_dst), rawptr(k), size_of_k)
+			intrinsics.mem_copy_non_overlapping(rawptr(v_dst), rawptr(v), size_of_v)
+			hs[pos] = h
+
+			return result if result != 0 else v_dst
+		}
+
+		k_src := map_cell_index_dynamic(ks, info.ks, la_pos)
+		v_src := map_cell_index_dynamic(vs, info.vs, la_pos)
+		probe_distance := map_probe_distance(m^, element_hash, la_pos)
+
+		if probe_distance < look_ahead {
+			// probed can be made ideal while placing saved (ending condition)
+			if result == 0 {
+				result = v_dst
+			}
+			intrinsics.mem_copy_non_overlapping(rawptr(k_dst), rawptr(k), size_of_k)
+			intrinsics.mem_copy_non_overlapping(rawptr(v_dst), rawptr(v), size_of_v)
+			hs[pos] = h
+
+			// This will be an ideal move
+			pos = (la_pos - probe_distance) & mask
+			look_ahead -= probe_distance
+
+			// shift until we hit ideal/empty
+			for probe_distance != 0 {
+				k_dst = map_cell_index_dynamic(ks, info.ks, pos)
+				v_dst = map_cell_index_dynamic(vs, info.vs, pos)
+
+				intrinsics.mem_copy_non_overlapping(rawptr(k_dst), rawptr(k_src), size_of_k)
+				intrinsics.mem_copy_non_overlapping(rawptr(v_dst), rawptr(v_src), size_of_v)
+				hs[pos] = element_hash
+				hs[la_pos] = 0
+
+				pos = (pos + 1) & mask
+				la_pos = (la_pos + 1) & mask
+				look_ahead = (la_pos - pos) & mask
+				element_hash = hs[la_pos]
+				if map_hash_is_empty(element_hash) {
+					return
+				}
+
+				probe_distance = map_probe_distance(m^, element_hash, la_pos)
+				if probe_distance == 0 {
+					return
+				}
+				// can be ideal?
+				if probe_distance < look_ahead {
+					pos = (la_pos - probe_distance) & mask
+				}
+				k_src = map_cell_index_dynamic(ks, info.ks, la_pos)
+				v_src = map_cell_index_dynamic(vs, info.vs, la_pos)
+			}
+			return
+		} else if distance < probe_distance - look_ahead {
+			// shift back probed
+			intrinsics.mem_copy_non_overlapping(rawptr(k_dst), rawptr(k_src), size_of_k)
+			intrinsics.mem_copy_non_overlapping(rawptr(v_dst), rawptr(v_src), size_of_v)
+			hs[pos] = element_hash
+			hs[la_pos] = 0
+		} else {
+			// place saved, save probed
+			if result == 0 {
+				result = v_dst
+			}
+			intrinsics.mem_copy_non_overlapping(rawptr(k_dst), rawptr(k), size_of_k)
+			intrinsics.mem_copy_non_overlapping(rawptr(v_dst), rawptr(v), size_of_v)
+			hs[pos] = h
+
+			intrinsics.mem_copy_non_overlapping(rawptr(k), rawptr(k_src), size_of_k)
+			intrinsics.mem_copy_non_overlapping(rawptr(v), rawptr(v_src), size_of_v)
+			h = hs[la_pos]
+			hs[la_pos] = 0
+			distance = probe_distance - look_ahead
+		}
+
+		pos = (pos + 1) & mask
+		distance += 1
+	}
 }
 
 @(require_results)
@@ -696,49 +746,19 @@ map_erase_dynamic :: #force_inline proc "contextless" (#no_alias m: ^Raw_Map, #n
 	m.len -= 1
 	ok = true
 
-	{ // coalesce tombstones
-		// HACK NOTE(bill): This is an ugly bodge but it is coalescing the tombstone slots
-		mask := (uintptr(1)<<map_log2_cap(m^)) - 1
-		curr_index := uintptr(index)
-
-		// TODO(bill): determine a good value for this empirically
-		// if we do not implement backward shift deletion
-		PROBE_COUNT :: 8
-		for _ in 0..<PROBE_COUNT {
-			next_index := (curr_index + 1) & mask
-			if next_index == index {
-				// looped around
-				break
-			}
-
-			// if the next element is empty or has zero probe distance, then any lookup
-			// will always fail on the next, so we can clear both of them
-			hash := hs[next_index]
-			if map_hash_is_empty(hash) || map_probe_distance(m^, hash, next_index) == 0 {
-				hs[curr_index] = 0
-				return
-			}
-
-			// now the next element will have a probe count of at least one,
-			// so it can use the delete slot instead
-			hs[curr_index] = hs[next_index]
-
-			mem_copy_non_overlapping(
-				rawptr(map_cell_index_dynamic(ks, info.ks, curr_index)),
-				rawptr(map_cell_index_dynamic(ks, info.ks, next_index)),
-				int(info.ks.size_of_type),
-			)
-			mem_copy_non_overlapping(
-				rawptr(map_cell_index_dynamic(vs, info.vs, curr_index)),
-				rawptr(map_cell_index_dynamic(vs, info.vs, next_index)),
-				int(info.vs.size_of_type),
-			)
-
-			curr_index = next_index
-		}
+	mask := (uintptr(1)<<map_log2_cap(m^)) - 1
+	curr_index := uintptr(index)
+	next_index := (curr_index + 1) & mask
 
+	// if the next element is empty or has zero probe distance, then any lookup
+	// will always fail on the next, so we can clear both of them
+	hash := hs[next_index]
+	if map_hash_is_empty(hash) || map_probe_distance(m^, hash, next_index) == 0 {
+		hs[curr_index] = 0
+	} else {
 		hs[curr_index] |= TOMBSTONE_MASK
 	}
+
 	return
 }
 

+ 2 - 0
core/text/i18n/i18n.odin

@@ -71,6 +71,8 @@ Error :: enum {
 	TS_File_Expected_Source,
 	TS_File_Expected_Translation,
 	TS_File_Expected_NumerusForm,
+	Bad_Str,
+	Bad_Id,
 
 }
 

+ 34 - 8
core/text/i18n/qt_linguist.odin

@@ -30,10 +30,26 @@ TS_XML_Options := xml.Options{
 parse_qt_linguist_from_bytes :: proc(data: []byte, options := DEFAULT_PARSE_OPTIONS, pluralizer: proc(int) -> int = nil, allocator := context.allocator) -> (translation: ^Translation, err: Error) {
 	context.allocator = allocator
 
+	get_str :: proc(val: xml.Value) -> (str: string, err: Error) {
+		v, ok := val.(string)
+		if ok {
+			return v, .None
+		}
+		return "", .Bad_Str
+	}
+
+	get_id :: proc(val: xml.Value) -> (str: xml.Element_ID, err: Error) {
+		v, ok := val.(xml.Element_ID)
+		if ok {
+			return v, .None
+		}
+		return 0, .Bad_Id
+	}
+
 	ts, xml_err := xml.parse(data, TS_XML_Options)
 	defer xml.destroy(ts)
 
-	if xml_err != .None || ts.element_count < 1 || ts.elements[0].ident != "TS" || len(ts.elements[0].children) == 0 {
+	if xml_err != .None || ts.element_count < 1 || ts.elements[0].ident != "TS" || len(ts.elements[0].value) == 0 {
 		return nil, .TS_File_Parse_Error
 	}
 
@@ -46,10 +62,12 @@ parse_qt_linguist_from_bytes :: proc(data: []byte, options := DEFAULT_PARSE_OPTI
 
 	section: ^Section
 
-	for child_id in ts.elements[0].children {
+	for value in ts.elements[0].value {
+		child_id := get_id(value) or_return
+
 		// These should be <context>s.
-		child := ts.elements[child_id]
-		if child.ident != "context" {
+
+		if ts.elements[child_id].ident != "context" {
 			return translation, .TS_File_Expected_Context
 		}
 
@@ -61,7 +79,8 @@ parse_qt_linguist_from_bytes :: proc(data: []byte, options := DEFAULT_PARSE_OPTI
 
 		section_name, _ := strings.intern_get(&translation.intern, "")
 		if !options.merge_sections {
-			section_name, _ = strings.intern_get(&translation.intern, ts.elements[section_name_id].value)
+			value_text := get_str(ts.elements[section_name_id].value[0]) or_return
+			section_name, _ = strings.intern_get(&translation.intern, value_text)
 		}
 
 		if section_name not_in translation.k_v {
@@ -92,8 +111,14 @@ parse_qt_linguist_from_bytes :: proc(data: []byte, options := DEFAULT_PARSE_OPTI
 				return translation, .TS_File_Expected_Translation
 			}
 
-			source, _ := strings.intern_get(&translation.intern, ts.elements[source_id].value)
-			xlat,   _ := strings.intern_get(&translation.intern, ts.elements[translation_id].value)
+			source    := get_str(ts.elements[source_id].value[0]) or_return
+			source, _  = strings.intern_get(&translation.intern, source)
+
+			xlat := ""
+			if !has_plurals {
+				xlat    = get_str(ts.elements[translation_id].value[0]) or_return
+				xlat, _ = strings.intern_get(&translation.intern, xlat)
+			}
 
 			if source in section {
 				return translation, .Duplicate_Key
@@ -124,7 +149,8 @@ parse_qt_linguist_from_bytes :: proc(data: []byte, options := DEFAULT_PARSE_OPTI
 					if !numerus_found {
 						break
 					}
-					numerus, _ := strings.intern_get(&translation.intern, ts.elements[numerus_id].value)
+					numerus := get_str(ts.elements[numerus_id].value[0]) or_return
+					numerus, _ = strings.intern_get(&translation.intern, numerus)
 					section[source][num_plurals] = numerus
 
 					num_plurals += 1

+ 2 - 2
core/thread/thread_windows.odin

@@ -129,8 +129,8 @@ _destroy :: proc(thread: ^Thread) {
 	free(thread, thread.creation_allocator)
 }
 
-_terminate :: proc(using thread : ^Thread, exit_code: int) {
-	win32.TerminateThread(win32_thread, u32(exit_code))
+_terminate :: proc(thread: ^Thread, exit_code: int) {
+	win32.TerminateThread(thread.win32_thread, u32(exit_code))
 }
 
 _yield :: proc() {

+ 19 - 17
core/time/time.odin

@@ -59,28 +59,30 @@ sleep :: proc "contextless" (d: Duration) {
 	_sleep(d)
 }
 
-stopwatch_start :: proc "contextless" (using stopwatch: ^Stopwatch) {
-	if !running {
-		_start_time = tick_now()
-		running = true
+stopwatch_start :: proc "contextless" (stopwatch: ^Stopwatch) {
+	if !stopwatch.running {
+		stopwatch._start_time = tick_now()
+		stopwatch.running = true
 	}
 }
 
-stopwatch_stop :: proc "contextless" (using stopwatch: ^Stopwatch) {
-	if running {
-		_accumulation += tick_diff(_start_time, tick_now())
-		running = false
+stopwatch_stop :: proc "contextless" (stopwatch: ^Stopwatch) {
+	if stopwatch.running {
+		stopwatch._accumulation += tick_diff(stopwatch._start_time, tick_now())
+		stopwatch.running = false
 	}
 }
 
-stopwatch_reset :: proc "contextless" (using stopwatch: ^Stopwatch) {
-	_accumulation = {}
-	running = false
+stopwatch_reset :: proc "contextless" (stopwatch: ^Stopwatch) {
+	stopwatch._accumulation = {}
+	stopwatch.running = false
 }
 
-stopwatch_duration :: proc "contextless" (using stopwatch: Stopwatch) -> Duration {
-	if !running { return _accumulation }
-	return _accumulation + tick_diff(_start_time, tick_now())
+stopwatch_duration :: proc "contextless" (stopwatch: Stopwatch) -> Duration {
+	if !stopwatch.running {
+		return stopwatch._accumulation
+	}
+	return stopwatch._accumulation + tick_diff(stopwatch._start_time, tick_now())
 }
 
 diff :: proc "contextless" (start, end: Time) -> Duration {
@@ -171,9 +173,9 @@ day :: proc "contextless" (t: Time) -> (day: int) {
 }
 
 weekday :: proc "contextless" (t: Time) -> (weekday: Weekday) {
-    abs := _time_abs(t)
-    sec := (abs + u64(Weekday.Monday) * SECONDS_PER_DAY) % SECONDS_PER_WEEK
-    return Weekday(int(sec) / SECONDS_PER_DAY)
+	abs := _time_abs(t)
+	sec := (abs + u64(Weekday.Monday) * SECONDS_PER_DAY) % SECONDS_PER_WEEK
+	return Weekday(int(sec) / SECONDS_PER_DAY)
 }
 
 clock :: proc { clock_from_time, clock_from_duration, clock_from_stopwatch }

+ 1 - 0
examples/demo/demo.odin

@@ -1,3 +1,4 @@
+//+vet !using-stmt !using-param
 package main
 
 import "core:fmt"

+ 39 - 3
src/build_settings.cpp

@@ -216,6 +216,43 @@ enum BuildPath : u8 {
 	BuildPathCOUNT,
 };
 
+enum VetFlags : u64 {
+	VetFlag_NONE       = 0,
+	VetFlag_Unused     = 1u<<0, // 1
+	VetFlag_Shadowing  = 1u<<1, // 2
+	VetFlag_UsingStmt  = 1u<<2, // 4
+	VetFlag_UsingParam = 1u<<3, // 8
+	VetFlag_Style      = 1u<<4, // 16
+	VetFlag_Semicolon  = 1u<<5, // 32
+
+	VetFlag_Extra     = 1u<<16,
+
+	VetFlag_All = VetFlag_Unused|VetFlag_Shadowing|VetFlag_UsingStmt, // excluding extra
+
+	VetFlag_Using = VetFlag_UsingStmt|VetFlag_UsingParam,
+};
+
+u64 get_vet_flag_from_name(String const &name) {
+	if (name == "unused") {
+		return VetFlag_Unused;
+	} else if (name == "shadowing") {
+		return VetFlag_Shadowing;
+	} else if (name == "using-stmt") {
+		return VetFlag_UsingStmt;
+	} else if (name == "using-param") {
+		return VetFlag_UsingParam;
+	} else if (name == "style") {
+		return VetFlag_Style;
+	} else if (name == "semicolon") {
+		return VetFlag_Semicolon;
+	} else if (name == "extra") {
+		return VetFlag_Extra;
+	}
+	return VetFlag_NONE;
+}
+
+
+
 // This stores the information for the specify architecture of this build
 struct BuildContext {
 	// Constants
@@ -255,6 +292,8 @@ struct BuildContext {
 	String resource_filepath;
 	String pdb_filepath;
 
+	u64 vet_flags;
+
 	bool   has_resource;
 	String link_flags;
 	String extra_linker_flags;
@@ -280,15 +319,12 @@ struct BuildContext {
 	bool   no_entry_point;
 	bool   no_thread_local;
 	bool   use_lld;
-	bool   vet;
-	bool   vet_extra;
 	bool   cross_compiling;
 	bool   different_os;
 	bool   keep_object_files;
 	bool   disallow_do;
 
 	bool   strict_style;
-	bool   strict_style_init_only;
 
 	bool   ignore_warnings;
 	bool   warnings_as_errors;

+ 1 - 1
src/check_builtin.cpp

@@ -1406,7 +1406,7 @@ gb_internal bool check_builtin_procedure_directive(CheckerContext *c, Operand *o
 		}
 		return false;
 	} else if (name == "load_or") {
-		warning(call, "'#load_or' is deprecated in favour of '#load(path) or_else default'");
+		error(call, "'#load_or' has now been removed in favour of '#load(path) or_else default'");
 
 		if (ce->args.count != 2) {
 			if (ce->args.count == 0) {

+ 10 - 33
src/check_decl.cpp

@@ -7,13 +7,15 @@ gb_internal Type *check_init_variable(CheckerContext *ctx, Entity *e, Operand *o
 		e->type == t_invalid) {
 
 		if (operand->mode == Addressing_Builtin) {
+			ERROR_BLOCK();
 			gbString expr_str = expr_to_string(operand->expr);
 
-			// TODO(bill): is this a good enough error message?
 			error(operand->expr,
-				  "Cannot assign built-in procedure '%s' in %.*s",
-				  expr_str,
-				  LIT(context_name));
+			      "Cannot assign built-in procedure '%s' in %.*s",
+			      expr_str,
+			      LIT(context_name));
+
+			error_line("\tBuilt-in procedures are implemented by the compiler and might not be actually instantiated procedure\n");
 
 			operand->mode = Addressing_Invalid;
 
@@ -159,9 +161,8 @@ gb_internal void check_init_constant(CheckerContext *ctx, Entity *e, Operand *op
 	}
 
 	if (operand->mode != Addressing_Constant) {
-		// TODO(bill): better error
 		gbString str = expr_to_string(operand->expr);
-		error(operand->expr, "'%s' is not a constant", str);
+		error(operand->expr, "'%s' is not a compile-time known constant", str);
 		gb_string_free(str);
 		if (e->type == nullptr) {
 			e->type = t_invalid;
@@ -354,31 +355,7 @@ gb_internal void check_type_decl(CheckerContext *ctx, Entity *e, Ast *init_expr,
 
 	// using decl
 	if (decl->is_using) {
-		warning(init_expr, "'using' an enum declaration is not allowed, prefer using implicit selector expressions e.g. '.A'");
-		#if 1
-		// NOTE(bill): Must be an enum declaration
-		if (te->kind == Ast_EnumType) {
-			Scope *parent = e->scope;
-			if (parent->flags&ScopeFlag_File) {
-				// NOTE(bill): Use package scope
-				parent = parent->parent;
-			}
-
-			Type *t = base_type(e->type);
-			if (t->kind == Type_Enum) {
-				for (Entity *f : t->Enum.fields) {
-					if (f->kind != Entity_Constant) {
-						continue;
-					}
-					String name = f->token.string;
-					if (is_blank_ident(name)) {
-						continue;
-					}
-					add_entity(ctx, parent, nullptr, f);
-				}
-			}
-		}
-		#endif
+		error(init_expr, "'using' an enum declaration is not allowed, prefer using implicit selector expressions e.g. '.A'");
 	}
 }
 
@@ -1064,7 +1041,7 @@ gb_internal void check_proc_decl(CheckerContext *ctx, Entity *e, DeclInfo *d) {
 		auto *fp = &ctx->info->foreigns;
 		StringHashKey key = string_hash_string(name);
 		Entity **found = string_map_get(fp, key);
-		if (found) {
+		if (found && e != *found) {
 			Entity *f = *found;
 			TokenPos pos = f->token.pos;
 			Type *this_type = base_type(e->type);
@@ -1636,7 +1613,7 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de
 	}
 	check_close_scope(ctx);
 
-	check_scope_usage(ctx->checker, ctx->scope);
+	check_scope_usage(ctx->checker, ctx->scope, check_vet_flags(body));
 
 	add_deps_from_child_to_parent(decl);
 

+ 14 - 34
src/check_expr.cpp

@@ -349,6 +349,10 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E
 		return false;
 	}
 
+	if (base_entity->flags & EntityFlag_Disabled) {
+		return false;
+	}
+
 	String name = base_entity->token.string;
 
 	Type *src = base_type(base_entity->type);
@@ -462,7 +466,7 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E
 
 
 	{
-		// LEAK TODO(bill): This is technically a memory leak as it has to generate the type twice
+		// LEAK NOTE(bill): This is technically a memory leak as it has to generate the type twice
 		bool prev_no_polymorphic_errors = nctx.no_polymorphic_errors;
 		defer (nctx.no_polymorphic_errors = prev_no_polymorphic_errors);
 		nctx.no_polymorphic_errors = false;
@@ -470,7 +474,7 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E
 		// NOTE(bill): Reset scope from the failed procedure type
 		scope_reset(scope);
 
-		// LEAK TODO(bill): Cloning this AST may be leaky
+		// LEAK NOTE(bill): Cloning this AST may be leaky but this is not really an issue due to arena-based allocation
 		Ast *cloned_proc_type_node = clone_ast(pt->node);
 		success = check_procedure_type(&nctx, final_proc_type, cloned_proc_type_node, &operands);
 		if (!success) {
@@ -778,16 +782,6 @@ gb_internal i64 check_distance_between_types(CheckerContext *c, Operand *operand
 		}
 	}
 
-	// ^T <- rawptr
-#if 0
-	// TODO(bill): Should C-style (not C++) pointer cast be allowed?
-	if (is_type_pointer(dst) && is_type_rawptr(src)) {
-	    return true;
-	}
-#endif
-#if 1
-
-
 	// rawptr <- ^T
 	if (are_types_identical(type, t_rawptr) && is_type_pointer(src)) {
 		return 5;
@@ -808,7 +802,6 @@ gb_internal i64 check_distance_between_types(CheckerContext *c, Operand *operand
 			return 4;
 		}
 	}
-#endif
 
 	if (is_type_polymorphic(dst) && !is_type_polymorphic(src)) {
 		bool modify_type = !c->no_polymorphic_errors;
@@ -824,7 +817,6 @@ gb_internal i64 check_distance_between_types(CheckerContext *c, Operand *operand
 			}
 		}
 
-		// TODO(bill): Determine which rule is a better on in practice
 		if (dst->Union.variants.count == 1) {
 			Type *vt = dst->Union.variants[0];
 			i64 score = check_distance_between_types(c, operand, vt);
@@ -1093,7 +1085,7 @@ gb_internal void check_assignment(CheckerContext *c, Operand *operand, Type *typ
 
 			// TODO(bill): is this a good enough error message?
 			error(operand->expr,
-			      "Cannot assign overloaded procedure '%s' to '%s' in %.*s",
+			      "Cannot assign overloaded procedure group '%s' to '%s' in %.*s",
 			      expr_str,
 			      op_type_str,
 			      LIT(context_name));
@@ -1120,7 +1112,6 @@ gb_internal void check_assignment(CheckerContext *c, Operand *operand, Type *typ
 
 		switch (operand->mode) {
 		case Addressing_Builtin:
-			// TODO(bill): Actually allow built in procedures to be passed around and thus be created on use
 			error(operand->expr,
 			      "Cannot assign built-in procedure '%s' in %.*s",
 			      expr_str,
@@ -1412,9 +1403,6 @@ gb_internal bool is_polymorphic_type_assignable(CheckerContext *c, Type *poly, T
 		return false;
 	case Type_Proc:
 		if (source->kind == Type_Proc) {
-			// return check_is_assignable_to(c, &o, poly);
-			// TODO(bill): Polymorphic type assignment
-			#if 1
 			TypeProc *x = &poly->Proc;
 			TypeProc *y = &source->Proc;
 			if (x->calling_convention != y->calling_convention) {
@@ -1447,7 +1435,6 @@ gb_internal bool is_polymorphic_type_assignable(CheckerContext *c, Type *poly, T
 			}
 
 			return true;
-			#endif
 		}
 		return false;
 	case Type_Map:
@@ -1699,7 +1686,6 @@ gb_internal bool check_unary_op(CheckerContext *c, Operand *o, Token op) {
 		gb_string_free(str);
 		return false;
 	}
-	// TODO(bill): Handle errors correctly
 	Type *type = base_type(core_array_type(o->type));
 	gbString str = nullptr;
 	switch (op.kind) {
@@ -1743,7 +1729,6 @@ gb_internal bool check_unary_op(CheckerContext *c, Operand *o, Token op) {
 gb_internal bool check_binary_op(CheckerContext *c, Operand *o, Token op) {
 	Type *main_type = o->type;
 
-	// TODO(bill): Handle errors correctly
 	Type *type = base_type(core_array_type(main_type));
 	Type *ct = core_type(type);
 
@@ -2261,7 +2246,7 @@ gb_internal bool check_is_not_addressable(CheckerContext *c, Operand *o) {
 }
 
 gb_internal void check_old_for_or_switch_value_usage(Ast *expr) {
-	if (!build_context.strict_style) {
+	if (!(build_context.strict_style || (check_vet_flags(expr) & VetFlag_Style))) {
 		return;
 	}
 
@@ -2351,7 +2336,7 @@ gb_internal void check_unary_expr(CheckerContext *c, Operand *o, Token op, Ast *
 				o->type = alloc_type_pointer(o->type);
 			}
 		} else {
-			if (build_context.strict_style && ast_node_expect(node, Ast_UnaryExpr)) {
+			if (ast_node_expect(node, Ast_UnaryExpr)) {
 				ast_node(ue, UnaryExpr, node);
 				check_old_for_or_switch_value_usage(ue->expr);
 			}
@@ -2775,8 +2760,6 @@ gb_internal void check_shift(CheckerContext *c, Operand *x, Operand *y, Ast *nod
 		gb_string_free(err_str);
 	}
 
-	// TODO(bill): Should we support shifts for fixed arrays and #simd vectors?
-
 	if (!is_type_integer(x->type)) {
 		gbString err_str = expr_to_string(x->expr);
 		error(node, "Shift operand '%s' must be an integer", err_str);
@@ -3099,7 +3082,7 @@ gb_internal void check_cast(CheckerContext *c, Operand *x, Type *type) {
 		update_untyped_expr_type(c, x->expr, final_type, true);
 	}
 
-	if (build_context.vet_extra) {
+	if (check_vet_flags(x->expr) & VetFlag_Extra) {
 		if (are_types_identical(x->type, type)) {
 			gbString str = type_to_string(type);
 			warning(x->expr, "Unneeded cast to the same type '%s'", str);
@@ -3171,7 +3154,7 @@ gb_internal bool check_transmute(CheckerContext *c, Ast *node, Operand *o, Type
 		return false;
 	}
 
-	if (build_context.vet_extra) {
+	if (check_vet_flags(node) & VetFlag_Extra) {
 		if (are_types_identical(o->type, dst_t)) {
 			gbString str = type_to_string(dst_t);
 			warning(o->expr, "Unneeded transmute to the same type '%s'", str);
@@ -4437,7 +4420,6 @@ gb_internal ExactValue get_constant_field_single(CheckerContext *c, ExactValue v
 	case_end;
 
 	default:
-		// TODO(bill): Should this be a general fallback?
 		if (success_) *success_ = true;
 		if (finish_) *finish_ = true;
 		return empty_exact_value;
@@ -4793,8 +4775,6 @@ gb_internal Entity *check_selector(CheckerContext *c, Operand *operand, Ast *nod
 	}
 
 	if (entity == nullptr && selector->kind == Ast_Ident && is_type_array(type_deref(operand->type))) {
-		// TODO(bill): Simd_Vector swizzling
-
 		String field_name = selector->Ident.token.string;
 		if (1 < field_name.len && field_name.len <= 4) {
 			u8 swizzles_xyzw[4] = {'x', 'y', 'z', 'w'};
@@ -5989,8 +5969,8 @@ gb_internal bool check_call_arguments_single(CheckerContext *c, Ast *call, Opera
 	}
 
 	Entity *entity_to_use = data->gen_entity != nullptr ? data->gen_entity : e;
-	add_entity_use(c, ident, entity_to_use);
 	if (!return_on_failure && entity_to_use != nullptr) {
+		add_entity_use(c, ident, entity_to_use);
 		update_untyped_expr_type(c, operand->expr, entity_to_use->type, true);
 		add_type_and_value(c, operand->expr, operand->mode, entity_to_use->type, operand->value);
 	}
@@ -7157,7 +7137,7 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c
 		i32 id = operand->builtin_id;
 		Entity *e = entity_of_node(operand->expr);
 		if (e != nullptr && e->token.string == "expand_to_tuple") {
-			warning(operand->expr, "'expand_to_tuple' has been replaced with 'expand_values'");
+			error(operand->expr, "'expand_to_tuple' has been replaced with 'expand_values'");
 		}
 		if (!check_builtin_procedure(c, operand, call, id, type_hint)) {
 			operand->mode = Addressing_Invalid;
@@ -10033,7 +10013,7 @@ gb_internal ExprKind check_expr_base_internal(CheckerContext *c, Operand *o, Ast
 			Type *type = type_of_expr(ac->expr);
 			check_cast(c, o, type_hint);
 			if (is_type_typed(type) && are_types_identical(type, type_hint)) {
-				if (build_context.vet_extra) {
+				if (check_vet_flags(node) & VetFlag_Extra) {
 					error(node, "Redundant 'auto_cast' applied to expression");
 				}
 			}

+ 11 - 6
src/check_stmt.cpp

@@ -384,7 +384,6 @@ gb_internal Type *check_assignment_variable(CheckerContext *ctx, Operand *lhs, O
 		}
 
 		if (e != nullptr) {
-			// HACK TODO(bill): Should the entities be freed as it's technically a leak
 			rhs->mode = Addressing_Value;
 			rhs->type = e->type;
 			rhs->proc_group = nullptr;
@@ -394,7 +393,7 @@ gb_internal Type *check_assignment_variable(CheckerContext *ctx, Operand *lhs, O
 			ast_node(i, Ident, node);
 			e = scope_lookup(ctx->scope, i->token.string);
 			if (e != nullptr && e->kind == Entity_Variable) {
-				used = (e->flags & EntityFlag_Used) != 0; // TODO(bill): Make backup just in case
+				used = (e->flags & EntityFlag_Used) != 0; // NOTE(bill): Make backup just in case
 			}
 		}
 
@@ -888,7 +887,7 @@ gb_internal void check_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags
 	check_open_scope(ctx, node);
 	defer (check_close_scope(ctx));
 
-	check_label(ctx, ss->label, node); // TODO(bill): What should the label's "scope" be?
+	check_label(ctx, ss->label, node);
 
 	if (ss->init != nullptr) {
 		check_stmt(ctx, ss->init, 0);
@@ -1125,7 +1124,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_
 	check_open_scope(ctx, node);
 	defer (check_close_scope(ctx));
 
-	check_label(ctx, ss->label, node); // TODO(bill): What should the label's "scope" be?
+	check_label(ctx, ss->label, node);
 
 	if (ss->tag->kind != Ast_AssignStmt) {
 		error(ss->tag, "Expected an 'in' assignment for this type switch statement");
@@ -1960,7 +1959,7 @@ gb_internal void check_value_decl_stmt(CheckerContext *ctx, Ast *node, u32 mod_f
 		Token token = ast_token(node);
 		if (vd->type != nullptr && entity_count > 1) {
 			error(token, "'using' can only be applied to one variable of the same type");
-			// TODO(bill): Should a 'continue' happen here?
+			// NOTE(bill): `using` will only be applied to a single declaration
 		}
 
 		for (isize entity_index = 0; entity_index < 1; entity_index++) {
@@ -2294,7 +2293,7 @@ gb_internal void check_for_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags) {
 	mod_flags |= Stmt_BreakAllowed | Stmt_ContinueAllowed;
 
 	check_open_scope(ctx, node);
-	check_label(ctx, fs->label, node); // TODO(bill): What should the label's "scope" be?
+	check_label(ctx, fs->label, node);
 
 	if (fs->init != nullptr) {
 		check_stmt(ctx, fs->init, 0);
@@ -2464,6 +2463,12 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags)
 			error(us->token, "Empty 'using' list");
 			return;
 		}
+		if (check_vet_flags(node) & VetFlag_UsingStmt) {
+			ERROR_BLOCK();
+			error(node, "'using' as a statement is not allowed when '-vet' or '-vet-using' is applied");
+			error_line("\t'using' is considered bad practice to use as a statement outside of immediate refactoring\n");
+		}
+
 		for (Ast *expr : us->list) {
 			expr = unparen_expr(expr);
 			Entity *e = nullptr;

+ 6 - 0
src/check_type.cpp

@@ -1474,6 +1474,12 @@ gb_internal Type *check_get_params(CheckerContext *ctx, Scope *scope, Ast *_para
 		Type *specialization = nullptr;
 
 		bool is_using = (p->flags&FieldFlag_using) != 0;
+		if ((check_vet_flags(param) & VetFlag_UsingParam) && is_using) {
+			ERROR_BLOCK();
+			error(param, "'using' on a procedure parameter is now allowed when '-vet' or '-vet-using-param' is applied");
+			error_line("\t'using' is considered bad practice to use as a statement/procedure parameter outside of immediate refactoring\n");
+
+		}
 
 		if (type_expr == nullptr) {
 			param_value = handle_parameter_value(ctx, nullptr, &type, default_value, true);

+ 63 - 36
src/checker.cpp

@@ -521,6 +521,28 @@ GB_COMPARE_PROC(entity_variable_pos_cmp) {
 }
 
 
+
+gb_internal u64 check_vet_flags(CheckerContext *c) {
+	AstFile *file = c->file;
+	if (file == nullptr &&
+	    c->curr_proc_decl &&
+	    c->curr_proc_decl->proc_lit) {
+		file = c->curr_proc_decl->proc_lit->file();
+	}
+	if (file && file->vet_flags_set) {
+		return file->vet_flags;
+	}
+	return build_context.vet_flags;
+}
+
+gb_internal u64 check_vet_flags(Ast *node) {
+	AstFile *file = node->file();
+	if (file && file->vet_flags_set) {
+		return file->vet_flags;
+	}
+	return build_context.vet_flags;
+}
+
 enum VettedEntityKind {
 	VettedEntity_Invalid,
 
@@ -655,9 +677,9 @@ gb_internal bool check_vet_unused(Checker *c, Entity *e, VettedEntity *ve) {
 	return false;
 }
 
-gb_internal void check_scope_usage(Checker *c, Scope *scope) {
-	bool vet_unused = true;
-	bool vet_shadowing = true;
+gb_internal void check_scope_usage(Checker *c, Scope *scope, u64 vet_flags) {
+	bool vet_unused = (vet_flags & VetFlag_Unused) != 0;
+	bool vet_shadowing = (vet_flags & (VetFlag_Shadowing|VetFlag_Using)) != 0;
 
 	Array<VettedEntity> vetted_entities = {};
 	array_init(&vetted_entities, heap_allocator());
@@ -691,15 +713,17 @@ gb_internal void check_scope_usage(Checker *c, Scope *scope) {
 
 		if (ve.kind == VettedEntity_Shadowed_And_Unused) {
 			error(e->token, "'%.*s' declared but not used, possibly shadows declaration at line %d", LIT(name), other->token.pos.line);
-		} else if (build_context.vet) {
+		} else if (vet_flags) {
 			switch (ve.kind) {
 			case VettedEntity_Unused:
-				error(e->token, "'%.*s' declared but not used", LIT(name));
+				if (vet_flags & VetFlag_Unused) {
+					error(e->token, "'%.*s' declared but not used", LIT(name));
+				}
 				break;
 			case VettedEntity_Shadowed:
-				if (e->flags&EntityFlag_Using) {
+				if ((vet_flags & (VetFlag_Shadowing|VetFlag_Using)) != 0 && e->flags&EntityFlag_Using) {
 					error(e->token, "Declaration of '%.*s' from 'using' shadows declaration at line %d", LIT(name), other->token.pos.line);
-				} else {
+				} else if ((vet_flags & (VetFlag_Shadowing)) != 0) {
 					error(e->token, "Declaration of '%.*s' shadows declaration at line %d", LIT(name), other->token.pos.line);
 				}
 				break;
@@ -726,7 +750,7 @@ gb_internal void check_scope_usage(Checker *c, Scope *scope) {
 		if (child->flags & (ScopeFlag_Proc|ScopeFlag_Type|ScopeFlag_File)) {
 			// Ignore these
 		} else {
-			check_scope_usage(c, child);
+			check_scope_usage(c, child, vet_flags);
 		}
 	}
 }
@@ -943,7 +967,6 @@ gb_internal void init_universal(void) {
 	add_global_bool_constant("true",  true);
 	add_global_bool_constant("false", false);
 
-	// TODO(bill): Set through flags in the compiler
 	add_global_string_constant("ODIN_VENDOR",  bc->ODIN_VENDOR);
 	add_global_string_constant("ODIN_VERSION", bc->ODIN_VERSION);
 	add_global_string_constant("ODIN_ROOT",    bc->ODIN_ROOT);
@@ -1455,7 +1478,6 @@ gb_internal void add_type_and_value(CheckerContext *ctx, Ast *expr, AddressingMo
 	if (ctx->decl) {
 		mutex = &ctx->decl->type_and_value_mutex;
 	} else if (ctx->pkg) {
-		// TODO(bill): is a per package mutex is a good idea here?
 		mutex = &ctx->pkg->type_and_value_mutex;
 	}
 
@@ -1583,30 +1605,28 @@ gb_internal void add_entity_use(CheckerContext *c, Ast *identifier, Entity *enti
 	if (entity == nullptr) {
 		return;
 	}
-	if (identifier != nullptr) {
-		if (identifier->kind != Ast_Ident) {
-			return;
-		}
-		Ast *empty_ident = nullptr;
-		entity->identifier.compare_exchange_strong(empty_ident, identifier);
-
-		identifier->Ident.entity = entity;
-
-		String dmsg = entity->deprecated_message;
-		if (dmsg.len > 0) {
-			warning(identifier, "%.*s is deprecated: %.*s", LIT(entity->token.string), LIT(dmsg));
-		}
-		String wmsg = entity->warning_message;
-		if (wmsg.len > 0) {
-			warning(identifier, "%.*s: %.*s", LIT(entity->token.string), LIT(wmsg));
-		}
-	}
-	entity->flags |= EntityFlag_Used;
 	add_declaration_dependency(c, entity);
+	entity->flags |= EntityFlag_Used;
 	if (entity_has_deferred_procedure(entity)) {
 		Entity *deferred = entity->Procedure.deferred_procedure.entity;
 		add_entity_use(c, nullptr, deferred);
 	}
+	if (identifier == nullptr || identifier->kind != Ast_Ident) {
+		return;
+	}
+	Ast *empty_ident = nullptr;
+	entity->identifier.compare_exchange_strong(empty_ident, identifier);
+
+	identifier->Ident.entity = entity;
+
+	String dmsg = entity->deprecated_message;
+	if (dmsg.len > 0) {
+		warning(identifier, "%.*s is deprecated: %.*s", LIT(entity->token.string), LIT(dmsg));
+	}
+	String wmsg = entity->warning_message;
+	if (wmsg.len > 0) {
+		warning(identifier, "%.*s: %.*s", LIT(entity->token.string), LIT(wmsg));
+	}
 }
 
 
@@ -2560,9 +2580,6 @@ gb_internal Array<EntityGraphNode *> generate_entity_dependency_graph(CheckerInf
 		}
 	}
 
-	// TODO(bill): This could be multithreaded to improve performance
-	// This means that the entity graph node set will have to be thread safe
-
 	TIME_SECTION("generate_entity_dependency_graph: Calculate edges for graph M - Part 2");
 	auto G = array_make<EntityGraphNode *>(allocator, 0, M.count);
 
@@ -2982,6 +2999,12 @@ gb_internal DECL_ATTRIBUTE_PROC(proc_group_attribute) {
 			}
 		}
 		return true;
+	} else if (name == "require_results") {
+		if (value != nullptr) {
+			error(elem, "Expected no value for '%.*s'", LIT(name));
+		}
+		ac->require_results = true;
+		return true;
 	}
 	return false;
 }
@@ -3059,7 +3082,7 @@ gb_internal DECL_ATTRIBUTE_PROC(proc_decl_attribute) {
 			check_expr(c, &o, value);
 			Entity *e = entity_of_node(o.expr);
 			if (e != nullptr && e->kind == Entity_Procedure) {
-				warning(elem, "'%.*s' is deprecated, please use one of the following instead: 'deferred_none', 'deferred_in', 'deferred_out'", LIT(name));
+				error(elem, "'%.*s' is not allowed any more, please use one of the following instead: 'deferred_none', 'deferred_in', 'deferred_out'", LIT(name));
 				if (ac->deferred_procedure.entity != nullptr) {
 					error(elem, "Previous usage of a 'deferred_*' attribute");
 				}
@@ -4558,7 +4581,7 @@ gb_internal DECL_ATTRIBUTE_PROC(foreign_import_decl_attribute) {
 		if (value != nullptr) {
 			error(elem, "Expected no parameter for '%.*s'", LIT(name));
 		} else if (name == "force") {
-			warning(elem, "'force' is deprecated and is identical to 'require'");
+			error(elem, "'force' was replaced with 'require'");
 		}
 		ac->require_declaration = true;
 		return true;
@@ -5956,7 +5979,11 @@ gb_internal void check_parsed_files(Checker *c) {
 	TIME_SECTION("check scope usage");
 	for (auto const &entry : c->info.files) {
 		AstFile *f = entry.value;
-		check_scope_usage(c, f->scope);
+		u64 vet_flags = build_context.vet_flags;
+		if (f->vet_flags_set) {
+			vet_flags = f->vet_flags;
+		}
+		check_scope_usage(c, f->scope, vet_flags);
 	}
 
 	TIME_SECTION("add basic type information");
@@ -6074,7 +6101,7 @@ gb_internal void check_parsed_files(Checker *c) {
 		while (mpsc_dequeue(&c->info.intrinsics_entry_point_usage, &node)) {
 			if (c->info.entry_point == nullptr && node != nullptr) {
 				if (node->file()->pkg->kind != Package_Runtime) {
-					warning(node, "usage of intrinsics.__entry_point will be a no-op");
+					error(node, "usage of intrinsics.__entry_point will be a no-op");
 				}
 			}
 		}

+ 3 - 2
src/checker.hpp

@@ -387,8 +387,6 @@ struct CheckerInfo {
 	BlockingMutex foreign_mutex; // NOT recursive
 	StringMap<Entity *> foreigns;
 
-	// NOTE(bill): These are actually MPSC queues
-	// TODO(bill): Convert them to be MPSC queues
 	MPSCQueue<Entity *> definition_queue;
 	MPSCQueue<Entity *> entity_queue;
 	MPSCQueue<Entity *> required_global_variable_queue;
@@ -449,6 +447,9 @@ struct CheckerContext {
 	Ast *assignment_lhs_hint;
 };
 
+gb_internal u64 check_vet_flags(CheckerContext *c);
+gb_internal u64 check_vet_flags(Ast *node);
+
 
 struct Checker {
 	Parser *    parser;

+ 3 - 4
src/entity.cpp

@@ -291,7 +291,6 @@ gb_internal bool is_entity_kind_exported(EntityKind kind, bool allow_builtin = f
 }
 
 gb_internal bool is_entity_exported(Entity *e, bool allow_builtin = false) {
-	// TODO(bill): Determine the actual exportation rules for imports of entities
 	GB_ASSERT(e != nullptr);
 	if (!is_entity_kind_exported(e->kind, allow_builtin)) {
 		return false;
@@ -405,7 +404,7 @@ gb_internal Entity *alloc_entity_array_elem(Scope *scope, Token token, Type *typ
 	return entity;
 }
 
-gb_internal Entity *alloc_entity_procedure(Scope *scope, Token token, Type *signature_type, u64 tags) {
+gb_internal Entity *alloc_entity_procedure(Scope *scope, Token token, Type *signature_type, u64 tags=0) {
 	Entity *entity = alloc_entity(Entity_Procedure, scope, token, signature_type);
 	entity->Procedure.tags = tags;
 	return entity;
@@ -422,7 +421,7 @@ gb_internal Entity *alloc_entity_import_name(Scope *scope, Token token, Type *ty
 	entity->ImportName.path = path;
 	entity->ImportName.name = name;
 	entity->ImportName.scope = import_scope;
-	entity->state = EntityState_Resolved; // TODO(bill): Is this correct?
+	entity->state = EntityState_Resolved;
 	return entity;
 }
 
@@ -431,7 +430,7 @@ gb_internal Entity *alloc_entity_library_name(Scope *scope, Token token, Type *t
 	Entity *entity = alloc_entity(Entity_LibraryName, scope, token, type);
 	entity->LibraryName.paths = paths;
 	entity->LibraryName.name = name;
-	entity->state = EntityState_Resolved; // TODO(bill): Is this correct?
+	entity->state = EntityState_Resolved;
 	return entity;
 }
 

+ 2 - 5
src/exact_value.cpp

@@ -26,8 +26,8 @@ enum ExactValueKind {
 	ExactValue_Complex    = 5,
 	ExactValue_Quaternion = 6,
 	ExactValue_Pointer    = 7,
-	ExactValue_Compound   = 8,  // TODO(bill): Is this good enough?
-	ExactValue_Procedure  = 9, // TODO(bill): Is this good enough?
+	ExactValue_Compound   = 8,
+	ExactValue_Procedure  = 9,
 	ExactValue_Typeid     = 10,
 
 	ExactValue_Count,
@@ -101,7 +101,6 @@ gb_internal ExactValue exact_value_bool(bool b) {
 }
 
 gb_internal ExactValue exact_value_string(String string) {
-	// TODO(bill): Allow for numbers with underscores in them
 	ExactValue result = {ExactValue_String};
 	result.value_string = string;
 	return result;
@@ -702,7 +701,6 @@ gb_internal void match_exact_values(ExactValue *x, ExactValue *y) {
 	compiler_error("match_exact_values: How'd you get here? Invalid ExactValueKind %d", x->kind);
 }
 
-// TODO(bill): Allow for pointer arithmetic? Or are pointer slices good enough?
 gb_internal ExactValue exact_binary_operator_value(TokenKind op, ExactValue x, ExactValue y) {
 	match_exact_values(&x, &y);
 
@@ -943,7 +941,6 @@ gb_internal bool compare_exact_values(TokenKind op, ExactValue x, ExactValue y)
 	case ExactValue_String: {
 		String a = x.value_string;
 		String b = y.value_string;
-		// TODO(bill): gb_memcompare is used because the strings are UTF-8
 		switch (op) {
 		case Token_CmpEq: return a == b;
 		case Token_NotEq: return a != b;

+ 1 - 2
src/llvm_backend_general.cpp

@@ -1861,8 +1861,8 @@ gb_internal LLVMTypeRef lb_type_internal(lbModule *m, Type *type) {
 			case Type_SimdVector:
 				return lb_type_internal(m, base);
 
-			// TODO(bill): Deal with this correctly. Can this be named?
 			case Type_Proc:
+				// TODO(bill): Deal with this correctly. Can this be named?
 				return lb_type_internal(m, base);
 
 			case Type_Tuple:
@@ -2835,7 +2835,6 @@ gb_internal lbValue lb_find_value_from_entity(lbModule *m, Entity *e) {
 	if (USE_SEPARATE_MODULES) {
 		lbModule *other_module = lb_module_of_entity(m->gen, e);
 
-		// TODO(bill): correct this logic
 		bool is_external = other_module != m;
 		if (!is_external) {
 			if (e->code_gen_module != nullptr) {

+ 1 - 4
src/llvm_backend_proc.cpp

@@ -362,7 +362,6 @@ gb_internal lbProcedure *lb_create_dummy_procedure(lbModule *m, String link_name
 
 	Type *pt = p->type;
 	lbCallingConventionKind cc_kind = lbCallingConvention_C;
-	// TODO(bill): Clean up this logic
 	if (!is_arch_wasm()) {
 		cc_kind = lb_calling_convention_map[pt->Proc.calling_convention];
 	}
@@ -1702,7 +1701,6 @@ gb_internal lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValu
 		lbValue v = lb_build_expr(p, ce->args[0]);
 		Type *t = base_type(v.type);
 		if (is_type_pointer(t)) {
-			// IMPORTANT TODO(bill): Should there be a nil pointer check?
 			v = lb_emit_load(p, v);
 			t = type_deref(t);
 		}
@@ -1730,7 +1728,6 @@ gb_internal lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValu
 		lbValue v = lb_build_expr(p, ce->args[0]);
 		Type *t = base_type(v.type);
 		if (is_type_pointer(t)) {
-			// IMPORTANT TODO(bill): Should there be a nil pointer check?
 			v = lb_emit_load(p, v);
 			t = type_deref(t);
 		}
@@ -3144,7 +3141,7 @@ gb_internal lbValue lb_build_call_expr(lbProcedure *p, Ast *expr) {
 
 	lbValue res = lb_build_call_expr_internal(p, expr);
 
-	if (ce->optional_ok_one) { // TODO(bill): Minor hack for #optional_ok procedures
+	if (ce->optional_ok_one) {
 		GB_ASSERT(is_type_tuple(res.type));
 		GB_ASSERT(res.type->Tuple.variables.count == 2);
 		return lb_emit_struct_ev(p, res, 0);

+ 1 - 4
src/llvm_backend_stmt.cpp

@@ -1688,7 +1688,6 @@ gb_internal void lb_build_type_switch_stmt(lbProcedure *p, AstTypeSwitchStmt *ss
 			lb_add_entity(p->module, case_entity, ptr);
 			lb_add_debug_local_variable(p, ptr.value, case_entity->type, case_entity->token);
 		} else {
-			// TODO(bill): is the correct expected behaviour?
 			lb_store_type_case_implicit(p, clause, parent_value);
 		}
 
@@ -2014,12 +2013,10 @@ gb_internal void lb_build_if_stmt(lbProcedure *p, Ast *node) {
 	defer (lb_close_scope(p, lbDeferExit_Default, nullptr));
 
 	if (is->init != nullptr) {
-		// TODO(bill): Should this have a separate block to begin with?
-	#if 1
 		lbBlock *init = lb_create_block(p, "if.init");
 		lb_emit_jump(p, init);
 		lb_start_block(p, init);
-	#endif
+
 		lb_build_stmt(p, is->init);
 	}
 	lbBlock *then = lb_create_block(p, "if.then");

+ 0 - 1
src/llvm_backend_type.cpp

@@ -731,7 +731,6 @@ gb_internal void lb_setup_type_info_data(lbProcedure *p) { // NOTE(bill): Setup
 
 				type_set_offsets(t); // NOTE(bill): Just incase the offsets have not been set yet
 				for (isize source_index = 0; source_index < count; source_index++) {
-					// TODO(bill): Order fields in source order not layout order
 					Entity *f = t->Struct.fields[source_index];
 					lbValue tip = lb_type_info(m, f->type);
 					i64 foffset = 0;

+ 89 - 39
src/main.cpp

@@ -1,5 +1,4 @@
 // #define NO_ARRAY_BOUNDS_CHECK
-
 #include "common.cpp"
 #include "timings.cpp"
 #include "tokenizer.cpp"
@@ -74,6 +73,12 @@ gb_global Timings global_timings = {0};
 #include "linker.cpp"
 
 #if defined(GB_SYSTEM_WINDOWS)
+#define ALLOW_TILDE 1
+#else
+#define ALLOW_TILDE 0
+#endif
+
+#if ALLOW_TILDE
 #include "tilde.cpp"
 #endif
 
@@ -243,8 +248,16 @@ enum BuildFlagKind {
 	BuildFlag_UseSeparateModules,
 	BuildFlag_NoThreadedChecker,
 	BuildFlag_ShowDebugMessages,
+
 	BuildFlag_Vet,
+	BuildFlag_VetShadowing,
+	BuildFlag_VetUnused,
+	BuildFlag_VetUsingStmt,
+	BuildFlag_VetUsingParam,
+	BuildFlag_VetStyle,
+	BuildFlag_VetSemicolon,
 	BuildFlag_VetExtra,
+
 	BuildFlag_IgnoreUnknownAttributes,
 	BuildFlag_ExtraLinkerFlags,
 	BuildFlag_ExtraAssemblerFlags,
@@ -261,7 +274,6 @@ enum BuildFlagKind {
 	BuildFlag_DisallowDo,
 	BuildFlag_DefaultToNilAllocator,
 	BuildFlag_StrictStyle,
-	BuildFlag_StrictStyleInitOnly,
 	BuildFlag_ForeignErrorProcedures,
 	BuildFlag_NoRTTI,
 	BuildFlag_DynamicMapCalls,
@@ -285,9 +297,9 @@ enum BuildFlagKind {
 	BuildFlag_InternalIgnoreLazy,
 	BuildFlag_InternalIgnoreLLVMBuild,
 
-#if defined(GB_SYSTEM_WINDOWS)
 	BuildFlag_Tilde,
 
+#if defined(GB_SYSTEM_WINDOWS)
 	BuildFlag_IgnoreVsSearch,
 	BuildFlag_ResourceFile,
 	BuildFlag_WindowsPdbName,
@@ -422,8 +434,16 @@ gb_internal bool parse_build_flags(Array<String> args) {
 	add_flag(&build_flags, BuildFlag_UseSeparateModules,      str_lit("use-separate-modules"),      BuildFlagParam_None,    Command__does_build);
 	add_flag(&build_flags, BuildFlag_NoThreadedChecker,       str_lit("no-threaded-checker"),       BuildFlagParam_None,    Command__does_check);
 	add_flag(&build_flags, BuildFlag_ShowDebugMessages,       str_lit("show-debug-messages"),       BuildFlagParam_None,    Command_all);
+
 	add_flag(&build_flags, BuildFlag_Vet,                     str_lit("vet"),                       BuildFlagParam_None,    Command__does_check);
+	add_flag(&build_flags, BuildFlag_VetUnused,               str_lit("vet-unused"),                BuildFlagParam_None,    Command__does_check);
+	add_flag(&build_flags, BuildFlag_VetShadowing,            str_lit("vet-shadowing"),             BuildFlagParam_None,    Command__does_check);
+	add_flag(&build_flags, BuildFlag_VetUsingStmt,            str_lit("vet-using-stmt"),            BuildFlagParam_None,    Command__does_check);
+	add_flag(&build_flags, BuildFlag_VetUsingParam,           str_lit("vet-using-param"),           BuildFlagParam_None,    Command__does_check);
+	add_flag(&build_flags, BuildFlag_VetStyle,                str_lit("vet-style"),                 BuildFlagParam_None,    Command__does_check);
+	add_flag(&build_flags, BuildFlag_VetSemicolon,            str_lit("vet-semicolon"),             BuildFlagParam_None,    Command__does_check);
 	add_flag(&build_flags, BuildFlag_VetExtra,                str_lit("vet-extra"),                 BuildFlagParam_None,    Command__does_check);
+
 	add_flag(&build_flags, BuildFlag_IgnoreUnknownAttributes, str_lit("ignore-unknown-attributes"), BuildFlagParam_None,    Command__does_check);
 	add_flag(&build_flags, BuildFlag_ExtraLinkerFlags,        str_lit("extra-linker-flags"),        BuildFlagParam_String,  Command__does_build);
 	add_flag(&build_flags, BuildFlag_ExtraAssemblerFlags,     str_lit("extra-assembler-flags"),     BuildFlagParam_String,  Command__does_build);
@@ -439,7 +459,6 @@ gb_internal bool parse_build_flags(Array<String> args) {
 	add_flag(&build_flags, BuildFlag_DisallowDo,              str_lit("disallow-do"),               BuildFlagParam_None,    Command__does_check);
 	add_flag(&build_flags, BuildFlag_DefaultToNilAllocator,   str_lit("default-to-nil-allocator"),  BuildFlagParam_None,    Command__does_check);
 	add_flag(&build_flags, BuildFlag_StrictStyle,             str_lit("strict-style"),              BuildFlagParam_None,    Command__does_check);
-	add_flag(&build_flags, BuildFlag_StrictStyleInitOnly,     str_lit("strict-style-init-only"),    BuildFlagParam_None,    Command__does_check);
 	add_flag(&build_flags, BuildFlag_ForeignErrorProcedures,  str_lit("foreign-error-procedures"),  BuildFlagParam_None,    Command__does_check);
 
 	add_flag(&build_flags, BuildFlag_NoRTTI,                  str_lit("no-rtti"),                   BuildFlagParam_None,    Command__does_check);
@@ -461,9 +480,11 @@ gb_internal bool parse_build_flags(Array<String> args) {
 	add_flag(&build_flags, BuildFlag_InternalIgnoreLazy,      str_lit("internal-ignore-lazy"),      BuildFlagParam_None,    Command_all);
 	add_flag(&build_flags, BuildFlag_InternalIgnoreLLVMBuild, str_lit("internal-ignore-llvm-build"),BuildFlagParam_None,    Command_all);
 
-#if defined(GB_SYSTEM_WINDOWS)
+#if ALLOW_TILDE
 	add_flag(&build_flags, BuildFlag_Tilde,                   str_lit("tilde"),                     BuildFlagParam_None,    Command__does_build);
+#endif
 
+#if defined(GB_SYSTEM_WINDOWS)
 	add_flag(&build_flags, BuildFlag_IgnoreVsSearch,          str_lit("ignore-vs-search"),          BuildFlagParam_None,    Command__does_build);
 	add_flag(&build_flags, BuildFlag_ResourceFile,            str_lit("resource"),                  BuildFlagParam_String,  Command__does_build);
 	add_flag(&build_flags, BuildFlag_WindowsPdbName,          str_lit("pdb-name"),                  BuildFlagParam_String,  Command__does_build);
@@ -956,13 +977,25 @@ gb_internal bool parse_build_flags(Array<String> args) {
 							build_context.show_debug_messages = true;
 							break;
 						case BuildFlag_Vet:
-							build_context.vet = true;
+							if (build_context.vet_flags & VetFlag_Extra) {
+								build_context.vet_flags |= VetFlag_All;
+							} else {
+								build_context.vet_flags &= ~VetFlag_Extra;
+								build_context.vet_flags |= VetFlag_All;
+							}
 							break;
-						case BuildFlag_VetExtra: {
-							build_context.vet = true;
-							build_context.vet_extra = true;
+
+						case BuildFlag_VetUnused:     build_context.vet_flags |= VetFlag_Unused;     break;
+						case BuildFlag_VetShadowing:  build_context.vet_flags |= VetFlag_Shadowing;  break;
+						case BuildFlag_VetUsingStmt:  build_context.vet_flags |= VetFlag_UsingStmt;  break;
+						case BuildFlag_VetUsingParam: build_context.vet_flags |= VetFlag_UsingParam; break;
+						case BuildFlag_VetStyle:      build_context.vet_flags |= VetFlag_Style;      break;
+						case BuildFlag_VetSemicolon:  build_context.vet_flags |= VetFlag_Semicolon;  break;
+
+						case BuildFlag_VetExtra:
+							build_context.vet_flags = VetFlag_All | VetFlag_Extra;
 							break;
-						}
+
 						case BuildFlag_IgnoreUnknownAttributes:
 							build_context.ignore_unknown_attributes = true;
 							break;
@@ -1050,20 +1083,9 @@ gb_internal bool parse_build_flags(Array<String> args) {
 						case BuildFlag_ForeignErrorProcedures:
 							build_context.ODIN_FOREIGN_ERROR_PROCEDURES = true;
 							break;
-						case BuildFlag_StrictStyle: {
-							if (build_context.strict_style_init_only) {
-								gb_printf_err("-strict-style and -strict-style-init-only cannot be used together\n");
-							}
+						case BuildFlag_StrictStyle:
 							build_context.strict_style = true;
 							break;
-						}
-						case BuildFlag_StrictStyleInitOnly: {
-							if (build_context.strict_style) {
-								gb_printf_err("-strict-style and -strict-style-init-only cannot be used together\n");
-							}
-							build_context.strict_style_init_only = true;
-							break;
-						}
 						case BuildFlag_Short:
 							build_context.cmd_doc_flags |= CmdDocFlag_Short;
 							break;
@@ -1130,11 +1152,11 @@ gb_internal bool parse_build_flags(Array<String> args) {
 						case BuildFlag_InternalIgnoreLLVMBuild:
 							build_context.ignore_llvm_build = true;
 							break;
-					#if defined(GB_SYSTEM_WINDOWS)
 						case BuildFlag_Tilde:
 							build_context.tilde_backend = true;
 							break;
 
+					#if defined(GB_SYSTEM_WINDOWS)
 						case BuildFlag_IgnoreVsSearch: {
 							GB_ASSERT(value.kind == ExactValue_Invalid);
 							build_context.ignore_microsoft_magic = true;
@@ -1170,7 +1192,7 @@ gb_internal bool parse_build_flags(Array<String> args) {
 								if (path_is_directory(path)) {
 									gb_printf_err("Invalid -pdb-name path. %.*s, is a directory.\n", LIT(path));
 									bad_flags = true;
-									break;									
+									break;
 								}
 								// #if defined(GB_SYSTEM_WINDOWS)
 								// 	String ext = path_extension(path);
@@ -1603,6 +1625,10 @@ gb_internal void print_show_help(String const arg0, String const &command) {
 		print_usage_line(2, "Shows an advanced overview of the timings of different stages within the compiler in milliseconds");
 		print_usage_line(0, "");
 
+		print_usage_line(1, "-show-system-calls");
+		print_usage_line(2, "Prints the whole command and arguments for calls to external tools like linker and assembler");
+		print_usage_line(0, "");
+
 		print_usage_line(1, "-export-timings:<format>");
 		print_usage_line(2, "Export timings to one of a few formats. Requires `-show-timings` or `-show-more-timings`");
 		print_usage_line(2, "Available options:");
@@ -1712,29 +1738,55 @@ gb_internal void print_show_help(String const arg0, String const &command) {
 	}
 
 	if (check) {
-		#if defined(GB_SYSTEM_WINDOWS)
 		print_usage_line(1, "-no-threaded-checker");
 		print_usage_line(2, "Disabled multithreading in the semantic checker stage");
 		print_usage_line(0, "");
-		#else
-		print_usage_line(1, "-threaded-checker");
-		print_usage_line(1, "[EXPERIMENTAL]");
-		print_usage_line(2, "Multithread the semantic checker stage");
-		print_usage_line(0, "");
-		#endif
+	}
 
+	if (check) {
 		print_usage_line(1, "-vet");
 		print_usage_line(2, "Do extra checks on the code");
 		print_usage_line(2, "Extra checks include:");
-		print_usage_line(3, "Variable shadowing within procedures");
-		print_usage_line(3, "Unused declarations");
+		print_usage_line(2, "-vet-unused");
+		print_usage_line(2, "-vet-shadowing");
+		print_usage_line(2, "-vet-using-stmt");
+		print_usage_line(0, "");
+
+		print_usage_line(1, "-vet-unused");
+		print_usage_line(2, "Checks for unused declarations");
+		print_usage_line(0, "");
+
+		print_usage_line(1, "-vet-shadowing");
+		print_usage_line(2, "Checks for variable shadowing within procedures");
+		print_usage_line(0, "");
+
+		print_usage_line(1, "-vet-using-stmt");
+		print_usage_line(2, "Checks for the use of 'using' as a statement");
+		print_usage_line(2, "'using' is considered bad practice outside of immediate refactoring");
+		print_usage_line(0, "");
+
+		print_usage_line(1, "-vet-using-param");
+		print_usage_line(2, "Checks for the use of 'using' on procedure parameters");
+		print_usage_line(2, "'using' is considered bad practice outside of immediate refactoring");
+		print_usage_line(0, "");
+
+		print_usage_line(1, "-vet-style");
+		print_usage_line(2, "Errs on missing trailing commas followed by a newline");
+		print_usage_line(2, "Errs on deprecated syntax");
+		print_usage_line(2, "Does not err on unneeded tokens (unlike -strict-style)");
+		print_usage_line(0, "");
+
+		print_usage_line(1, "-vet-semicolon");
+		print_usage_line(2, "Errs on unneeded semicolons");
 		print_usage_line(0, "");
 
 		print_usage_line(1, "-vet-extra");
 		print_usage_line(2, "Do even more checks than standard vet on the code");
 		print_usage_line(2, "To treat the extra warnings as errors, use -warnings-as-errors");
 		print_usage_line(0, "");
+	}
 
+	if (check) {
 		print_usage_line(1, "-ignore-unknown-attributes");
 		print_usage_line(2, "Ignores unknown attributes");
 		print_usage_line(2, "This can be used with metaprogramming tools");
@@ -1804,10 +1856,8 @@ gb_internal void print_show_help(String const arg0, String const &command) {
 
 		print_usage_line(1, "-strict-style");
 		print_usage_line(2, "Errs on unneeded tokens, such as unneeded semicolons");
-		print_usage_line(0, "");
-
-		print_usage_line(1, "-strict-style-init-only");
-		print_usage_line(2, "Errs on unneeded tokens, such as unneeded semicolons, only on the initial project");
+		print_usage_line(2, "Errs on missing trailing commas followed by a newline");
+		print_usage_line(2, "Errs on deprecated syntax");
 		print_usage_line(0, "");
 
 		print_usage_line(1, "-ignore-warnings");
@@ -2417,7 +2467,7 @@ int main(int arg_count, char const **arg_ptr) {
 		for_array(i, build_context.build_paths) {
 			String build_path = path_to_string(heap_allocator(), build_context.build_paths[i]);
 			debugf("build_paths[%ld]: %.*s\n", i, LIT(build_path));
-		}		
+		}
 	}
 
 	TIME_SECTION("init thread pool");
@@ -2487,7 +2537,7 @@ int main(int arg_count, char const **arg_ptr) {
 		return 0;
 	}
 
-#if defined(GB_SYSTEM_WINDOWS)
+#if ALLOW_TILDE
 	if (build_context.tilde_backend) {
 		LinkerData linker_data = {};
 		MAIN_TIME_SECTION("Tilde Code Gen");

+ 127 - 33
src/parser.cpp

@@ -1,7 +1,21 @@
 #include "parser_pos.cpp"
 
-// #undef at the bottom of this file
-#define ALLOW_NEWLINE (!build_context.strict_style)
+gb_internal u64 ast_file_vet_flags(AstFile *f) {
+	if (f->vet_flags_set) {
+		return f->vet_flags;
+	}
+	return build_context.vet_flags;
+}
+
+gb_internal bool ast_file_vet_style(AstFile *f) {
+	return (ast_file_vet_flags(f) & VetFlag_Style) != 0;
+}
+
+
+gb_internal bool file_allow_newline(AstFile *f) {
+	bool is_strict = build_context.strict_style || ast_file_vet_style(f);
+	return !is_strict;
+}
 
 gb_internal Token token_end_of_line(AstFile *f, Token tok) {
 	u8 const *start = f->tokenizer.start + tok.pos.offset;
@@ -1567,29 +1581,29 @@ gb_internal void assign_removal_flag_to_semicolon(AstFile *f) {
 	Token *prev_token = &f->tokens[f->prev_token_index];
 	Token *curr_token = &f->tokens[f->curr_token_index];
 	GB_ASSERT(prev_token->kind == Token_Semicolon);
-	if (prev_token->string == ";") {
-		bool ok = false;
-		if (curr_token->pos.line > prev_token->pos.line) {
+	if (prev_token->string != ";") {
+		return;
+	}
+	bool ok = false;
+	if (curr_token->pos.line > prev_token->pos.line) {
+		ok = true;
+	} else if (curr_token->pos.line == prev_token->pos.line) {
+		switch (curr_token->kind) {
+		case Token_CloseBrace:
+		case Token_CloseParen:
+		case Token_EOF:
 			ok = true;
-		} else if (curr_token->pos.line == prev_token->pos.line) {
-			switch (curr_token->kind) {
-			case Token_CloseBrace:
-			case Token_CloseParen:
-			case Token_EOF:
-				ok = true;
-				break;
-			}
-		}
-			
-		if (ok) {
-			if (build_context.strict_style) {
-				syntax_error(*prev_token, "Found unneeded semicolon");
-			} else if (build_context.strict_style_init_only && f->pkg->kind == Package_Init) {
-				syntax_error(*prev_token, "Found unneeded semicolon");
-			}
-			prev_token->flags |= TokenFlag_Remove;
+			break;
 		}
 	}
+	if (!ok) {
+		return;
+	}
+
+	if (build_context.strict_style || (ast_file_vet_flags(f) & VetFlag_Semicolon)) {
+		syntax_error(*prev_token, "Found unneeded semicolon");
+	}
+	prev_token->flags |= TokenFlag_Remove;
 }
 
 gb_internal void expect_semicolon(AstFile *f) {
@@ -2748,7 +2762,7 @@ gb_internal Ast *parse_call_expr(AstFile *f, Ast *operand) {
 	isize prev_expr_level = f->expr_level;
 	bool prev_allow_newline = f->allow_newline;
 	f->expr_level = 0;
-	f->allow_newline = ALLOW_NEWLINE;
+	f->allow_newline = file_allow_newline(f);
 
 	open_paren = expect_token(f, Token_OpenParen);
 
@@ -3147,7 +3161,7 @@ gb_internal Ast *parse_expr(AstFile *f, bool lhs) {
 
 gb_internal Array<Ast *> parse_expr_list(AstFile *f, bool lhs) {
 	bool allow_newline = f->allow_newline;
-	f->allow_newline = ALLOW_NEWLINE;
+	f->allow_newline = file_allow_newline(f);
 
 	auto list = array_make<Ast *>(heap_allocator());
 	for (;;) {
@@ -3472,7 +3486,7 @@ gb_internal Ast *parse_results(AstFile *f, bool *diverging) {
 	Ast *list = nullptr;
 	expect_token(f, Token_OpenParen);
 	list = parse_field_list(f, nullptr, FieldFlag_Results, Token_CloseParen, true, false);
-	if (ALLOW_NEWLINE) {
+	if (file_allow_newline(f)) {
 		skip_possible_newline(f);
 	}
 	expect_token_after(f, Token_CloseParen, "parameter list");
@@ -3532,7 +3546,7 @@ gb_internal Ast *parse_proc_type(AstFile *f, Token proc_token) {
 
 	expect_token(f, Token_OpenParen);
 	params = parse_field_list(f, nullptr, FieldFlag_Signature, Token_CloseParen, true, true);
-	if (ALLOW_NEWLINE) {
+	if (file_allow_newline(f)) {
 		skip_possible_newline(f);
 	}
 	expect_token_after(f, Token_CloseParen, "parameter list");
@@ -3754,7 +3768,7 @@ gb_internal bool allow_field_separator(AstFile *f) {
 	}
 	if (token.kind == Token_Semicolon) {
 		bool ok = false;
-		if (ALLOW_NEWLINE && token_is_newline(token)) {
+		if (file_allow_newline(f) && token_is_newline(token)) {
 			TokenKind next = peek_token(f).kind;
 			switch (next) {
 			case Token_CloseBrace:
@@ -3818,7 +3832,7 @@ gb_internal bool check_procedure_name_list(Array<Ast *> const &names) {
 gb_internal Ast *parse_field_list(AstFile *f, isize *name_count_, u32 allowed_flags, TokenKind follow, bool allow_default_parameters, bool allow_typeid_token) {
 	bool prev_allow_newline = f->allow_newline;
 	defer (f->allow_newline = prev_allow_newline);
-	f->allow_newline = ALLOW_NEWLINE;
+	f->allow_newline = file_allow_newline(f);
 
 	Token start_token = f->curr_token;
 
@@ -4954,7 +4968,6 @@ gb_internal bool init_parser(Parser *p) {
 
 gb_internal void destroy_parser(Parser *p) {
 	GB_ASSERT(p != nullptr);
-	// TODO(bill): Fix memory leak
 	for (AstPackage *pkg : p->packages) {
 		for (AstFile *file : pkg->files) {
 			destroy_ast_file(file);
@@ -4998,7 +5011,6 @@ gb_internal WORKER_TASK_PROC(parser_worker_proc) {
 
 
 gb_internal void parser_add_file_to_process(Parser *p, AstPackage *pkg, FileInfo fi, TokenPos pos) {
-	// TODO(bill): Use a better allocator
 	ImportedFile f = {pkg, fi, pos, p->file_to_process_count++};
 	auto wd = gb_alloc_item(permanent_allocator(), ParserWorkerData);
 	wd->parser = p;
@@ -5528,6 +5540,88 @@ gb_internal bool parse_build_tag(Token token_for_pos, String s) {
 	return any_correct;
 }
 
+gb_internal String vet_tag_get_token(String s, String *out) {
+	s = string_trim_whitespace(s);
+	isize n = 0;
+	while (n < s.len) {
+		Rune rune = 0;
+		isize width = utf8_decode(&s[n], s.len-n, &rune);
+		if (n == 0 && rune == '!') {
+
+		} else if (!rune_is_letter(rune) && !rune_is_digit(rune) && rune != '-') {
+			isize k = gb_max(gb_max(n, width), 1);
+			*out = substring(s, k, s.len);
+			return substring(s, 0, k);
+		}
+		n += width;
+	}
+	out->len = 0;
+	return s;
+}
+
+
+gb_internal u64 parse_vet_tag(Token token_for_pos, String s) {
+	String const prefix = str_lit("+vet");
+	GB_ASSERT(string_starts_with(s, prefix));
+	s = string_trim_whitespace(substring(s, prefix.len, s.len));
+
+	if (s.len == 0) {
+		return VetFlag_All;
+	}
+
+
+	u64 vet_flags = 0;
+	u64 vet_not_flags = 0;
+
+	while (s.len > 0) {
+		String p = string_trim_whitespace(vet_tag_get_token(s, &s));
+		if (p.len == 0) {
+			break;
+		}
+
+		bool is_notted = false;
+		if (p[0] == '!') {
+			is_notted = true;
+			p = substring(p, 1, p.len);
+			if (p.len == 0) {
+				syntax_error(token_for_pos, "Expected a vet flag name after '!'");
+				return build_context.vet_flags;
+			}
+		}
+
+		u64 flag = get_vet_flag_from_name(p);
+		if (flag != VetFlag_NONE) {
+			if (is_notted) {
+				vet_not_flags |= flag;
+			} else {
+				vet_flags     |= flag;
+			}
+		} else {
+			ERROR_BLOCK();
+			syntax_error(token_for_pos, "Invalid vet flag name: %.*s", LIT(p));
+			error_line("\tExpected one of the following\n");
+			error_line("\tunused\n");
+			error_line("\tshadowing\n");
+			error_line("\tusing-stmt\n");
+			error_line("\tusing-param\n");
+			error_line("\textra\n");
+			return build_context.vet_flags;
+		}
+	}
+
+	if (vet_flags == 0 && vet_not_flags == 0) {
+		return build_context.vet_flags;
+	}
+	if (vet_flags == 0 && vet_not_flags != 0) {
+		return build_context.vet_flags &~ vet_not_flags;
+	}
+	if (vet_flags != 0 && vet_not_flags == 0) {
+		return vet_flags;
+	}
+	GB_ASSERT(vet_flags != 0 && vet_not_flags != 0);
+	return vet_flags &~ vet_not_flags;
+}
+
 gb_internal String dir_from_path(String path) {
 	String base_dir = path;
 	for (isize i = path.len-1; i >= 0; i--) {
@@ -5679,6 +5773,9 @@ gb_internal bool parse_file(Parser *p, AstFile *f) {
 						if (!parse_build_tag(tok, lc)) {
 							return false;
 						}
+					} else if (string_starts_with(lc, str_lit("+vet"))) {
+						f->vet_flags = parse_vet_tag(tok, lc);
+						f->vet_flags_set = true;
 					} else if (string_starts_with(lc, str_lit("+ignore"))) {
 							return false;
 					} else if (string_starts_with(lc, str_lit("+private"))) {
@@ -5920,6 +6017,3 @@ gb_internal ParseFileError parse_packages(Parser *p, String init_filename) {
 	return ParseFile_None;
 }
 
-
-
-#undef ALLOW_NEWLINE

+ 2 - 0
src/parser.hpp

@@ -104,6 +104,8 @@ struct AstFile {
 	Token        package_token;
 	String       package_name;
 
+	u64          vet_flags;
+	bool         vet_flags_set;
 
 	// >= 0: In Expression
 	// <  0: In Control Clause

+ 2 - 2
src/tokenizer.cpp

@@ -696,8 +696,8 @@ gb_internal void tokenizer_get_token(Tokenizer *t, Token *token, int repeat=0) {
 			if (entry->kind != Token_Invalid && entry->hash == hash) {
 				if (str_eq(entry->text, token->string)) {
 					token->kind = entry->kind;
-					if (token->kind == Token_not_in && entry->text == "notin") {
-						syntax_warning(*token, "'notin' is deprecated in favour of 'not_in'");
+					if (token->kind == Token_not_in && entry->text.len == 5) {
+						syntax_error(*token, "Did you mean 'not_in'?");
 					}
 				}
 			}

+ 14 - 10
src/types.cpp

@@ -143,6 +143,7 @@ struct TypeStruct {
 	Type *          soa_elem;
 	i32             soa_count;
 	StructSoaKind   soa_kind;
+	BlockingMutex   mutex; // for settings offsets
 
 	bool            is_polymorphic;
 	bool            are_offsets_set             : 1;
@@ -244,6 +245,7 @@ struct TypeProc {
 	TYPE_KIND(Tuple, struct {                                 \
 		Slice<Entity *> variables; /* Entity_Variable */  \
 		i64 *           offsets;                          \
+		BlockingMutex   mutex; /* for settings offsets */ \
 		bool            are_offsets_being_processed;      \
 		bool            are_offsets_set;                  \
 		bool            is_packed;                        \
@@ -822,6 +824,9 @@ gb_internal void type_path_pop(TypePath *tp) {
 #define FAILURE_ALIGNMENT 0
 
 gb_internal bool type_ptr_set_update(PtrSet<Type *> *s, Type *t) {
+	if (t == nullptr) {
+		return true;
+	}
 	if (ptr_set_exists(s, t)) {
 		return true;
 	}
@@ -830,13 +835,17 @@ gb_internal bool type_ptr_set_update(PtrSet<Type *> *s, Type *t) {
 }
 
 gb_internal bool type_ptr_set_exists(PtrSet<Type *> *s, Type *t) {
+	if (t == nullptr) {
+		return true;
+	}
+
 	if (ptr_set_exists(s, t)) {
 		return true;
 	}
 
 	// TODO(bill, 2019-10-05): This is very slow and it's probably a lot
 	// faster to cache types correctly
-	for (Type *f : *s) {
+	for (Type *f : *s) if (f->kind == t->kind) {
 		if (are_types_identical(t, f)) {
 			ptr_set_add(s, t);
 			return true;
@@ -989,7 +998,7 @@ gb_internal Type *alloc_type_enumerated_array(Type *elem, Type *index, ExactValu
 
 gb_internal Type *alloc_type_slice(Type *elem) {
 	Type *t = alloc_type(Type_Slice);
-	t->Array.elem = elem;
+	t->Slice.elem = elem;
 	return t;
 }
 
@@ -2667,7 +2676,6 @@ gb_internal bool are_types_identical_internal(Type *x, Type *y, bool check_tuple
 		    x->Struct.soa_kind == y->Struct.soa_kind &&
 		    x->Struct.soa_count == y->Struct.soa_count &&
 		    are_types_identical(x->Struct.soa_elem, y->Struct.soa_elem)) {
-			// TODO(bill); Fix the custom alignment rule
 			for_array(i, x->Struct.fields) {
 				Entity *xf = x->Struct.fields[i];
 				Entity *yf = y->Struct.fields[i];
@@ -2808,7 +2816,6 @@ gb_internal i64 union_tag_size(Type *u) {
 		return 0;
 	}
 
-	// TODO(bill): Is this an okay approach?
 	i64 max_align = 1;
 
 	if (u->Union.variants.count < 1ull<<8) {
@@ -2818,7 +2825,7 @@ gb_internal i64 union_tag_size(Type *u) {
 	} else if (u->Union.variants.count < 1ull<<32) {
 		max_align = 4;
 	} else {
-		GB_PANIC("how many variants do you have?!");
+		compiler_error("how many variants do you have?! %lld", cast(long long)u->Union.variants.count);
 	}
 
 	for_array(i, u->Union.variants) {
@@ -3137,8 +3144,6 @@ gb_internal Selection lookup_field_with_selection(Type *type_, String field_name
 		switch (type->Basic.kind) {
 		case Basic_any: {
 		#if 1
-			// IMPORTANT TODO(bill): Should these members be available to should I only allow them with
-			// `Raw_Any` type?
 			String data_str = str_lit("data");
 			String id_str = str_lit("id");
 			gb_local_persist Entity *entity__any_data = alloc_entity_field(nullptr, make_token_ident(data_str), t_rawptr, false, 0);
@@ -3672,10 +3677,9 @@ gb_internal i64 *type_set_offsets_of(Slice<Entity *> const &fields, bool is_pack
 }
 
 gb_internal bool type_set_offsets(Type *t) {
-	MUTEX_GUARD(&g_type_mutex); // TODO(bill): only per struct
-
 	t = base_type(t);
 	if (t->kind == Type_Struct) {
+		MUTEX_GUARD(&t->Struct.mutex);
 		if (!t->Struct.are_offsets_set) {
 			t->Struct.are_offsets_being_processed = true;
 			t->Struct.offsets = type_set_offsets_of(t->Struct.fields, t->Struct.is_packed, t->Struct.is_raw_union);
@@ -3684,6 +3688,7 @@ gb_internal bool type_set_offsets(Type *t) {
 			return true;
 		}
 	} else if (is_type_tuple(t)) {
+		MUTEX_GUARD(&t->Tuple.mutex);
 		if (!t->Tuple.are_offsets_set) {
 			t->Tuple.are_offsets_being_processed = true;
 			t->Tuple.offsets = type_set_offsets_of(t->Tuple.variables, t->Tuple.is_packed, false);
@@ -3858,7 +3863,6 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) {
 					max = size;
 				}
 			}
-			// TODO(bill): Is this how it should work?
 			return align_formula(max, align);
 		} else {
 			i64 count = 0, size = 0, align = 0;

+ 14 - 20
tests/core/encoding/hxa/test_core_hxa.odin

@@ -21,16 +21,13 @@ main :: proc() {
 
 @test
 test_read :: proc(t: ^testing.T) {
-
-	using hxa
-
 	filename := tc.get_data_path(t, TEAPOT_PATH)
 	defer delete(filename)
 
-	file, err := read_from_file(filename)
+	file, err := hxa.read_from_file(filename)
 	e :: hxa.Read_Error.None
 	tc.expect(t, err == e, fmt.tprintf("%v: read_from_file(%v) -> %v != %v", #procedure, filename, err, e))
-	defer file_destroy(file)
+	defer hxa.file_destroy(file)
 
 	/* Header */
 	tc.expect(t, file.magic_number == 0x417848, fmt.tprintf("%v: file.magic_number %v != %v",
@@ -134,38 +131,35 @@ test_read :: proc(t: ^testing.T) {
 
 @test
 test_write :: proc(t: ^testing.T) {
-
-	using hxa
-
-	n1 :Node
+	n1: hxa.Node
 
 	n1_m1_value := []f64le{0.4, -1.23, 2341.6, -333.333}
-	n1_m1 := Meta{"m1", n1_m1_value}
+	n1_m1 := hxa.Meta{"m1", n1_m1_value}
 
-	n1.meta_data = []Meta{n1_m1}
+	n1.meta_data = []hxa.Meta{n1_m1}
 
-	n1_l1 := Layer{"l1", 2, []f32le{32.1, -41.3}}
-	n1_l2 := Layer{"l2", 3, []f64le{0.64, 1.64, -2.64}}
+	n1_l1 := hxa.Layer{"l1", 2, []f32le{32.1, -41.3}}
+	n1_l2 := hxa.Layer{"l2", 3, []f64le{0.64, 1.64, -2.64}}
 
-	n1_content := Node_Image{Image_Type.Image_1D, [3]u32le{1, 1, 2}, Layer_Stack{n1_l1, n1_l2}} 
+	n1_content := hxa.Node_Image{.Image_1D, [3]u32le{1, 1, 2}, hxa.Layer_Stack{n1_l1, n1_l2}}
 
 	n1.content = n1_content
 
-	w_file :File
-	w_file.nodes = []Node{n1}
+	w_file: hxa.File
+	w_file.nodes = []hxa.Node{n1}
 
-	required_size := required_write_size(w_file)
+	required_size := hxa.required_write_size(w_file)
 	buf := make([]u8, required_size)
 
-	n, write_err := write(buf, w_file)
+	n, write_err := hxa.write(buf, w_file)
 	write_e :: hxa.Write_Error.None
 	tc.expect(t, write_err == write_e, fmt.tprintf("%v: write_err %v != %v", #procedure, write_err, write_e))
 	tc.expect(t, n == required_size, fmt.tprintf("%v: n %v != %v", #procedure, n, required_size))
 
-	file, read_err := read(buf)
+	file, read_err := hxa.read(buf)
 	read_e :: hxa.Read_Error.None
 	tc.expect(t, read_err == read_e, fmt.tprintf("%v: read_err %v != %v", #procedure, read_err, read_e))
-	defer file_destroy(file)
+	defer hxa.file_destroy(file)
 
 	delete(buf)
 

+ 33 - 36
tests/core/encoding/xml/test_core_xml.odin

@@ -47,7 +47,7 @@ TESTS :: []TEST{
 			},
 			expected_doctype = "恥ずべきフクロウ",
 		},
-		crc32     = 0x30d82264,
+		crc32     = 0xe9b62f03,
 	},
 
 	{
@@ -62,7 +62,7 @@ TESTS :: []TEST{
 			},
 			expected_doctype = "恥ずべきフクロウ",
 		},
-		crc32     = 0xad31d8e8,
+		crc32     = 0x9c2643ed,
 	},
 
 	{
@@ -77,7 +77,7 @@ TESTS :: []TEST{
 			},
 			expected_doctype = "TS",
 		},
-		crc32     = 0x7bce2630,
+		crc32     = 0x859b7443,
 	},
 
 	{
@@ -92,7 +92,7 @@ TESTS :: []TEST{
 			},
 			expected_doctype = "xliff",
 		},
-		crc32     = 0x43f19d61,
+		crc32     = 0x3deaf329,
 	},
 
 	{
@@ -107,7 +107,7 @@ TESTS :: []TEST{
 			},
 			expected_doctype = "xliff",
 		},
-		crc32     = 0x961e7635,
+		crc32     = 0x0c55e287,
 	},
 
 	{
@@ -118,7 +118,7 @@ TESTS :: []TEST{
 			},
 			expected_doctype = "html",
 		},
-		crc32     = 0x573c1033,
+		crc32     = 0x05373317,
 	},
 
 	{
@@ -129,7 +129,7 @@ TESTS :: []TEST{
 			},
 			expected_doctype = "html",
 		},
-		crc32     = 0x82588917,
+		crc32     = 0x3b6d4a90,
 	},
 
 	{
@@ -140,7 +140,7 @@ TESTS :: []TEST{
 			},
 			expected_doctype = "html",
 		},
-		crc32     = 0x5e74d8a6,
+		crc32     = 0x5be2ffdc,
 	},
 
 	/*
@@ -170,7 +170,7 @@ TESTS :: []TEST{
 			expected_doctype = "",
 		},
 		err       = .None,
-		crc32     = 0xcaa042b9,
+		crc32     = 0x420dbac5,
 	},
 }
 
@@ -214,43 +214,40 @@ doc_to_string :: proc(doc: ^xml.Document) -> (result: string) {
 	*/
 	print :: proc(writer: io.Writer, doc: ^xml.Document) -> (written: int, err: io.Error) {
 		if doc == nil { return }
-		using fmt
 
-		written += wprintf(writer, "[XML Prolog]\n")
+		written += fmt.wprintf(writer, "[XML Prolog]\n")
 
 		for attr in doc.prologue {
-			written += wprintf(writer, "\t%v: %v\n", attr.key, attr.val)
+			written += fmt.wprintf(writer, "\t%v: %v\n", attr.key, attr.val)
 		}
 
-		written += wprintf(writer, "[Encoding] %v\n", doc.encoding)
+		written += fmt.wprintf(writer, "[Encoding] %v\n", doc.encoding)
 
 		if len(doc.doctype.ident) > 0 {
-			written += wprintf(writer, "[DOCTYPE]  %v\n", doc.doctype.ident)
+			written += fmt.wprintf(writer, "[DOCTYPE]  %v\n", doc.doctype.ident)
 
 			if len(doc.doctype.rest) > 0 {
-			 	wprintf(writer, "\t%v\n", doc.doctype.rest)
+			 	fmt.wprintf(writer, "\t%v\n", doc.doctype.rest)
 			}
 		}
 
 		for comment in doc.comments {
-			written += wprintf(writer, "[Pre-root comment]  %v\n", comment)
+			written += fmt.wprintf(writer, "[Pre-root comment]  %v\n", comment)
 		}
 
 		if doc.element_count > 0 {
-		 	wprintln(writer, " --- ")
+		 	fmt.wprintln(writer, " --- ")
 		 	print_element(writer, doc, 0)
-		 	wprintln(writer, " --- ")
+		 	fmt.wprintln(writer, " --- ")
 		 }
 
 		return written, .None
 	}
 
 	print_element :: proc(writer: io.Writer, doc: ^xml.Document, element_id: xml.Element_ID, indent := 0) -> (written: int, err: io.Error) {
-		using fmt
-
 		tab :: proc(writer: io.Writer, indent: int) {
 			for _ in 0..=indent {
-				wprintf(writer, "\t")
+				fmt.wprintf(writer, "\t")
 			}
 		}
 
@@ -259,22 +256,24 @@ doc_to_string :: proc(doc: ^xml.Document) -> (result: string) {
 		element := doc.elements[element_id]
 
 		if element.kind == .Element {
-			wprintf(writer, "<%v>\n", element.ident)
-			if len(element.value) > 0 {
-				tab(writer, indent + 1)
-				wprintf(writer, "[Value] %v\n", element.value)
+			fmt.wprintf(writer, "<%v>\n", element.ident)
+
+			for value in element.value {
+				switch v in value {
+				case string:
+					tab(writer, indent + 1)
+					fmt.wprintf(writer, "[Value] %v\n", v)
+				case xml.Element_ID:
+					print_element(writer, doc, v, indent + 1)
+				}
 			}
 
 			for attr in element.attribs {
 				tab(writer, indent + 1)
-				wprintf(writer, "[Attr] %v: %v\n", attr.key, attr.val)
-			}
-
-			for child in element.children {
-				print_element(writer, doc, child, indent + 1)
+				fmt.wprintf(writer, "[Attr] %v: %v\n", attr.key, attr.val)
 			}
 		} else if element.kind == .Comment {
-			wprintf(writer, "[COMMENT] %v\n", element.value)
+			fmt.wprintf(writer, "[COMMENT] %v\n", element.value)
 		}
 
 		return written, .None
@@ -289,8 +288,6 @@ doc_to_string :: proc(doc: ^xml.Document) -> (result: string) {
 
 @test
 run_tests :: proc(t: ^testing.T) {
-	using fmt
-
 	for test in TESTS {
 		path := test_file_path(test.filename)
 		log(t, fmt.tprintf("Trying to parse %v", path))
@@ -305,11 +302,11 @@ run_tests :: proc(t: ^testing.T) {
 		crc32 := hash.crc32(tree_bytes)
 
 		failed := err != test.err
-		err_msg := tprintf("Expected return value %v, got %v", test.err, err)
+		err_msg := fmt.tprintf("Expected return value %v, got %v", test.err, err)
 		expect(t, err == test.err, err_msg)
 
 		failed |= crc32 != test.crc32
-		err_msg  = tprintf("Expected CRC 0x%08x, got 0x%08x, with options %v", test.crc32, crc32, test.options)
+		err_msg  = fmt.tprintf("Expected CRC 0x%08x, got 0x%08x, with options %v", test.crc32, crc32, test.options)
 		expect(t, crc32 == test.crc32, err_msg)
 
 		if failed {
@@ -317,7 +314,7 @@ run_tests :: proc(t: ^testing.T) {
 				Don't fully print big trees.
 			*/
 			tree_string = tree_string[:min(2_048, len(tree_string))]
-			println(tree_string)
+			fmt.println(tree_string)
 		}
 	}
 }

+ 4 - 10
tests/core/math/linalg/glsl/test_linalg_glsl_math.odin

@@ -22,9 +22,6 @@ main :: proc() {
 
 @test
 test_fract_f32 :: proc(t: ^testing.T) {
-
-	using math
-
 	r: f32
 
 	Datum :: struct {
@@ -35,8 +32,8 @@ test_fract_f32 :: proc(t: ^testing.T) {
 	@static data := []Datum{
 		{ 0, 10.5, 0.5 }, // Issue #1574 fract in linalg/glm is broken
 		{ 1, -10.5, -0.5 },
-		{ 2, F32_MIN, F32_MIN }, // 0x1p-126
-		{ 3, -F32_MIN, -F32_MIN },
+		{ 2, math.F32_MIN, math.F32_MIN }, // 0x1p-126
+		{ 3, -math.F32_MIN, -math.F32_MIN },
 		{ 4, 0.0, 0.0 },
 		{ 5, -0.0, -0.0 },
 		{ 6, 1, 0.0 },
@@ -54,9 +51,6 @@ test_fract_f32 :: proc(t: ^testing.T) {
 
 @test
 test_fract_f64 :: proc(t: ^testing.T) {
-
-	using math
-
 	r: f64
 
 	Datum :: struct {
@@ -67,8 +61,8 @@ test_fract_f64 :: proc(t: ^testing.T) {
 	@static data := []Datum{
 		{ 0, 10.5, 0.5 }, // Issue #1574 fract in linalg/glm is broken
 		{ 1, -10.5, -0.5 },
-		{ 2, F64_MIN, F64_MIN }, // 0x1p-1022
-		{ 3, -F64_MIN, -F64_MIN },
+		{ 2, math.F64_MIN, math.F64_MIN }, // 0x1p-1022
+		{ 3, -math.F64_MIN, -math.F64_MIN },
 		{ 4, 0.0, 0.0 },
 		{ 5, -0.0, -0.0 },
 		{ 6, 1, 0.0 },

+ 73 - 94
tests/core/math/test_core_math.odin

@@ -43,11 +43,7 @@ main :: proc() {
 
 @test
 test_classify_f16 :: proc(t: ^testing.T) {
-
-	using math
-	using Float_Class
-
-	r: Float_Class
+	r: math.Float_Class
 
 	Datum :: struct {
 		i: int,
@@ -55,38 +51,34 @@ test_classify_f16 :: proc(t: ^testing.T) {
 		e: math.Float_Class,
 	}
 	@static data := []Datum{
-		{ 0, 1.2, Normal },
-		{ 1, 0h0001, Subnormal },
-		{ 2, 0.0, Zero },
-		{ 3, -0.0, Neg_Zero },
-		{ 4, SNAN_F16, NaN },
-		{ 5, QNAN_F16, NaN },
-		{ 6, INF_F16, Inf },
-		{ 7, NEG_INF_F16, Neg_Inf },
+		{ 0, 1.2, .Normal },
+		{ 1, 0h0001, .Subnormal },
+		{ 2, 0.0, .Zero },
+		{ 3, -0.0, .Neg_Zero },
+		{ 4, math.SNAN_F16, .NaN },
+		{ 5, math.QNAN_F16, .NaN },
+		{ 6, math.INF_F16, .Inf },
+		{ 7, math.NEG_INF_F16, .Neg_Inf },
 	}
 
 	for d, i in data {
 		assert(i == d.i)
-		r = classify_f16(d.v)
+		r = math.classify_f16(d.v)
 		tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(%h) -> %v != %v", i, #procedure, d.v, r, d.e))
 	}
 
 	/* Check all subnormals (exponent 0, 10-bit significand non-zero) */
-	for i :u16 = 1; i < 0x400; i += 1 {
-		v :f16 = transmute(f16)i
-		r = classify_f16(v)
-		e :Float_Class: Subnormal
+	for i in u16(1)..<0x400 {
+		v := transmute(f16)i
+		r = math.classify_f16(v)
+		e :: math.Float_Class.Subnormal
 		tc.expect(t, r == e, fmt.tprintf("i:%d %s(%h) -> %v != %v", i, #procedure, v, r, e))
 	}
 }
 
 @test
 test_classify_f32 :: proc(t: ^testing.T) {
-
-	using math
-	using Float_Class
-
-	r: Float_Class
+	r: math.Float_Class
 
 	Datum :: struct {
 		i: int,
@@ -94,30 +86,26 @@ test_classify_f32 :: proc(t: ^testing.T) {
 		e: math.Float_Class,
 	}
 	@static data := []Datum{
-		{ 0, 1.2, Normal },
-		{ 1, 0h0000_0001, Subnormal },
-		{ 2, 0.0, Zero },
-		{ 3, -0.0, Neg_Zero },
-		{ 4, SNAN_F32, NaN },
-		{ 5, QNAN_F32, NaN },
-		{ 6, INF_F32, Inf },
-		{ 7, NEG_INF_F32, Neg_Inf },
+		{ 0, 1.2, .Normal },
+		{ 1, 0h0000_0001, .Subnormal },
+		{ 2, 0.0, .Zero },
+		{ 3, -0.0, .Neg_Zero },
+		{ 4, math.SNAN_F32, .NaN },
+		{ 5, math.QNAN_F32, .NaN },
+		{ 6, math.INF_F32, .Inf },
+		{ 7, math.NEG_INF_F32, .Neg_Inf },
 	}
 
 	for d, i in data {
 		assert(i == d.i)
-		r = classify_f32(d.v)
+		r = math.classify_f32(d.v)
 		tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(%h) -> %v != %v", i, #procedure, d.v, r, d.e))
 	}
 }
 
 @test
 test_classify_f64 :: proc(t: ^testing.T) {
-
-	using math
-	using Float_Class
-
-	r: Float_Class
+	r: math.Float_Class
 
 	Datum :: struct {
 		i: int,
@@ -125,28 +113,25 @@ test_classify_f64 :: proc(t: ^testing.T) {
 		e: math.Float_Class,
 	}
 	@static data := []Datum{
-		{ 0, 1.2, Normal },
-		{ 1, 0h0000_0000_0000_0001, Subnormal },
-		{ 2, 0.0, Zero },
-		{ 3, -0.0, Neg_Zero },
-		{ 4, SNAN_F64, NaN },
-		{ 5, QNAN_F64, NaN },
-		{ 6, INF_F64, Inf },
-		{ 7, NEG_INF_F64, Neg_Inf },
+		{ 0, 1.2, .Normal },
+		{ 1, 0h0000_0000_0000_0001, .Subnormal },
+		{ 2, 0.0, .Zero },
+		{ 3, -0.0, .Neg_Zero },
+		{ 4, math.SNAN_F64, .NaN },
+		{ 5, math.QNAN_F64, .NaN },
+		{ 6, math.INF_F64, .Inf },
+		{ 7, math.NEG_INF_F64, .Neg_Inf },
 	}
 
 	for d, i in data {
 		assert(i == d.i)
-		r = classify_f64(d.v)
+		r = math.classify_f64(d.v)
 		tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(%h) -> %v != %v", i, #procedure, d.v, r, d.e))
 	}
 }
 
 @test
 test_trunc_f16 :: proc(t: ^testing.T) {
-
-	using math
-
 	r, v: f16
 
 	Datum :: struct {
@@ -158,16 +143,16 @@ test_trunc_f16 :: proc(t: ^testing.T) {
 		{ 0, 10.5, 10 }, // Issue #1574 fract in linalg/glm is broken
 		{ 1, -10.5, -10 },
 
-		{ 2, F16_MAX, F16_MAX },
-		{ 3, -F16_MAX, -F16_MAX },
-		{ 4, F16_MIN, 0.0 },
-		{ 5, -F16_MIN, -0.0 },
+		{ 2, math.F16_MAX, math.F16_MAX },
+		{ 3, -math.F16_MAX, -math.F16_MAX },
+		{ 4, math.F16_MIN, 0.0 },
+		{ 5, -math.F16_MIN, -0.0 },
 		{ 6, 0.0, 0.0 },
 		{ 7, -0.0, -0.0 },
 		{ 8, 1, 1 },
 		{ 9, -1, -1 },
-		{ 10, INF_F16, INF_F16 },
-		{ 11, NEG_INF_F16, NEG_INF_F16 },
+		{ 10, math.INF_F16, math.INF_F16 },
+		{ 11, math.NEG_INF_F16, math.NEG_INF_F16 },
 
 		/* From https://en.wikipedia.org/wiki/Half-precision_floating-point_format */
 		{ 12, 0h3C01, 1 }, // 0x1.004p+0 (smallest > 1)
@@ -185,24 +170,21 @@ test_trunc_f16 :: proc(t: ^testing.T) {
 
 	for d, i in data {
 		assert(i == d.i)
-		r = trunc_f16(d.v)
+		r = math.trunc_f16(d.v)
 		tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(%h) -> %h != %h", i, #procedure, d.v, r, d.e))
 	}
 
-	v = SNAN_F16
-	r = trunc_f16(v)
-	tc.expect(t, is_nan_f16(r), fmt.tprintf("%s(%f) -> %f != NaN", #procedure, v, r))
+	v = math.SNAN_F16
+	r = math.trunc_f16(v)
+	tc.expect(t, math.is_nan_f16(r), fmt.tprintf("%s(%f) -> %f != NaN", #procedure, v, r))
 
-	v = QNAN_F16
-	r = trunc_f16(v)
-	tc.expect(t, is_nan_f16(r), fmt.tprintf("%s(%f) -> %f != NaN", #procedure, v, r))
+	v = math.QNAN_F16
+	r = math.trunc_f16(v)
+	tc.expect(t, math.is_nan_f16(r), fmt.tprintf("%s(%f) -> %f != NaN", #procedure, v, r))
 }
 
 @test
 test_trunc_f32 :: proc(t: ^testing.T) {
-
-	using math
-
 	r, v: f32
 
 	Datum :: struct {
@@ -214,16 +196,16 @@ test_trunc_f32 :: proc(t: ^testing.T) {
 		{ 0, 10.5, 10 }, // Issue #1574 fract in linalg/glm is broken
 		{ 1, -10.5, -10 },
 
-		{ 2, F32_MAX, F32_MAX },
-		{ 3, -F32_MAX, -F32_MAX },
-		{ 4, F32_MIN, 0.0 },
-		{ 5, -F32_MIN, -0.0 },
+		{ 2, math.F32_MAX, math.F32_MAX },
+		{ 3, -math.F32_MAX, -math.F32_MAX },
+		{ 4, math.F32_MIN, 0.0 },
+		{ 5, -math.F32_MIN, -0.0 },
 		{ 6, 0.0, 0.0 },
 		{ 7, -0.0, -0.0 },
 		{ 8, 1, 1 },
 		{ 9, -1, -1 },
-		{ 10, INF_F32, INF_F32 },
-		{ 11, NEG_INF_F32, NEG_INF_F32 },
+		{ 10, math.INF_F32, math.INF_F32 },
+		{ 11, math.NEG_INF_F32, math.NEG_INF_F32 },
 
 		/* From https://en.wikipedia.org/wiki/Single-precision_floating-point_format */
 		{ 12, 0h3F80_0001, 1 }, // 0x1.000002p+0 (smallest > 1)
@@ -250,24 +232,21 @@ test_trunc_f32 :: proc(t: ^testing.T) {
 
 	for d, i in data {
 		assert(i == d.i)
-		r = trunc_f32(d.v)
+		r = math.trunc_f32(d.v)
 		tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(%h) -> %h != %h", i, #procedure, d.v, r, d.e))
 	}
 
-	v = SNAN_F32
-	r = trunc_f32(v)
-	tc.expect(t, is_nan_f32(r), fmt.tprintf("%s(%f) -> %f != NaN", #procedure, v, r))
+	v = math.SNAN_F32
+	r = math.trunc_f32(v)
+	tc.expect(t, math.is_nan_f32(r), fmt.tprintf("%s(%f) -> %f != NaN", #procedure, v, r))
 
-	v = QNAN_F32
-	r = trunc_f32(v)
-	tc.expect(t, is_nan_f32(r), fmt.tprintf("%s(%f) -> %f != NaN", #procedure, v, r))
+	v = math.QNAN_F32
+	r = math.trunc_f32(v)
+	tc.expect(t, math.is_nan_f32(r), fmt.tprintf("%s(%f) -> %f != NaN", #procedure, v, r))
 }
 
 @test
 test_trunc_f64 :: proc(t: ^testing.T) {
-
-	using math
-
 	r, v: f64
 
 	Datum :: struct {
@@ -279,16 +258,16 @@ test_trunc_f64 :: proc(t: ^testing.T) {
 		{ 0, 10.5, 10 }, // Issue #1574 fract in linalg/glm is broken
 		{ 1, -10.5, -10 },
 
-		{ 2, F64_MAX, F64_MAX },
-		{ 3, -F64_MAX, -F64_MAX },
-		{ 4, F64_MIN, 0.0 },
-		{ 5, -F64_MIN, -0.0 },
+		{ 2, math.F64_MAX, math.F64_MAX },
+		{ 3, -math.F64_MAX, -math.F64_MAX },
+		{ 4, math.F64_MIN, 0.0 },
+		{ 5, -math.F64_MIN, -0.0 },
 		{ 6, 0.0, 0.0 },
 		{ 7, -0.0, -0.0 },
 		{ 8, 1, 1 },
 		{ 9, -1, -1 },
-		{ 10, INF_F64, INF_F64 },
-		{ 11, NEG_INF_F64, NEG_INF_F64 },
+		{ 10, math.INF_F64, math.INF_F64 },
+		{ 11, math.NEG_INF_F64, math.NEG_INF_F64 },
 
 		/* From https://en.wikipedia.org/wiki/Double-precision_floating-point_format */
 		{ 12, 0h3FF0_0000_0000_0001, 1 }, // 0x1.0000000000001p+0 (smallest > 1)
@@ -315,17 +294,17 @@ test_trunc_f64 :: proc(t: ^testing.T) {
 
 	for d, i in data {
 		assert(i == d.i)
-		r = trunc_f64(d.v)
+		r = math.trunc_f64(d.v)
 		tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(%h) -> %h != %h", i, #procedure, d.v, r, d.e))
 	}
 
-	v = SNAN_F64
-	r = trunc_f64(v)
-	tc.expect(t, is_nan_f64(r), fmt.tprintf("%s(%f) -> %f != NaN", #procedure, v, r))
+	v = math.SNAN_F64
+	r = math.trunc_f64(v)
+	tc.expect(t, math.is_nan_f64(r), fmt.tprintf("%s(%f) -> %f != NaN", #procedure, v, r))
 
-	v = QNAN_F64
-	r = trunc_f64(v)
-	tc.expect(t, is_nan_f64(r), fmt.tprintf("%s(%f) -> %f != NaN", #procedure, v, r))
+	v = math.QNAN_F64
+	r = math.trunc_f64(v)
+	tc.expect(t, math.is_nan_f64(r), fmt.tprintf("%s(%f) -> %f != NaN", #procedure, v, r))
 }
 
 

+ 6 - 12
tests/core/path/filepath/test_core_filepath.odin

@@ -22,9 +22,6 @@ main :: proc() {
 
 @test
 test_split_list_windows :: proc(t: ^testing.T) {
-
-	using filepath
-
 	Datum :: struct {
 		i: int,
 		v: string,
@@ -43,7 +40,7 @@ test_split_list_windows :: proc(t: ^testing.T) {
 
 	for d, i in data {
 		assert(i == d.i, fmt.tprintf("wrong data index: i %d != d.i %d\n", i, d.i))
-		r := split_list(d.v)
+		r := filepath.split_list(d.v)
 		defer delete(r)
 		tc.expect(t, len(r) == len(d.e), fmt.tprintf("i:%d %s(%s) len(r) %d != len(d.e) %d",
 													 i, #procedure, d.v, len(r), len(d.e)))
@@ -57,12 +54,12 @@ test_split_list_windows :: proc(t: ^testing.T) {
 
 	{
 		v := ""
-		r := split_list(v)
+		r := filepath.split_list(v)
 		tc.expect(t, r == nil, fmt.tprintf("%s(%s) -> %v != nil", #procedure, v, r))
 	}
 	{
 		v := "a"
-		r := split_list(v)
+		r := filepath.split_list(v)
 		defer delete(r)
 		tc.expect(t, len(r) == 1, fmt.tprintf("%s(%s) len(r) %d != 1", #procedure, v, len(r)))
 		if len(r) == 1 {
@@ -73,9 +70,6 @@ test_split_list_windows :: proc(t: ^testing.T) {
 
 @test
 test_split_list_unix :: proc(t: ^testing.T) {
-
-	using filepath
-
 	Datum :: struct {
 		i: int,
 		v: string,
@@ -94,7 +88,7 @@ test_split_list_unix :: proc(t: ^testing.T) {
 
 	for d, i in data {
 		assert(i == d.i, fmt.tprintf("wrong data index: i %d != d.i %d\n", i, d.i))
-		r := split_list(d.v)
+		r := filepath.split_list(d.v)
 		defer delete(r)
 		tc.expect(t, len(r) == len(d.e), fmt.tprintf("i:%d %s(%s) len(r) %d != len(d.e) %d",
 													 i, #procedure, d.v, len(r), len(d.e)))
@@ -108,12 +102,12 @@ test_split_list_unix :: proc(t: ^testing.T) {
 
 	{
 		v := ""
-		r := split_list(v)
+		r := filepath.split_list(v)
 		tc.expect(t, r == nil, fmt.tprintf("%s(%s) -> %v != nil", #procedure, v, r))
 	}
 	{
 		v := "a"
-		r := split_list(v)
+		r := filepath.split_list(v)
 		defer delete(r)
 		tc.expect(t, len(r) == 1, fmt.tprintf("%s(%s) len(r) %d != 1", #procedure, v, len(r)))
 		if len(r) == 1 {

+ 16 - 20
tests/core/reflect/test_core_reflect.odin

@@ -19,8 +19,6 @@ main :: proc() {
 
 @test
 test_as_u64 :: proc(t: ^testing.T) {
-	using reflect
-
 	{
 		/* i8 */
 		Datum :: struct { i: int, v: i8, e: u64 }
@@ -32,7 +30,7 @@ test_as_u64 :: proc(t: ^testing.T) {
 
 		for d, i in data {
 			assert(i == d.i)
-			r, valid := as_u64(d.v)
+			r, valid := reflect.as_u64(d.v)
 			tc.expect(t, valid, fmt.tprintf("i:%d %s(i8 %v) !valid\n", i, #procedure, d.v))
 			tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(i8 %v) -> %v (0x%X) != %v (0x%X)\n",
 												i, #procedure, d.v, r, r, d.e, d.e))
@@ -49,7 +47,7 @@ test_as_u64 :: proc(t: ^testing.T) {
 
 		for d, i in data {
 			assert(i == d.i)
-			r, valid := as_u64(d.v)
+			r, valid := reflect.as_u64(d.v)
 			tc.expect(t, valid, fmt.tprintf("i:%d %s(i16 %v) !valid\n", i, #procedure, d.v))
 			tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(i16 %v) -> %v (0x%X) != %v (0x%X)\n",
 												i, #procedure, d.v, r, r, d.e, d.e))
@@ -66,7 +64,7 @@ test_as_u64 :: proc(t: ^testing.T) {
 
 		for d, i in data {
 			assert(i == d.i)
-			r, valid := as_u64(d.v)
+			r, valid := reflect.as_u64(d.v)
 			tc.expect(t, valid, fmt.tprintf("i:%d %s(i32 %v) !valid\n", i, #procedure, d.v))
 			tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(i32 %v) -> %v (0x%X) != %v (0x%X)\n",
 												i, #procedure, d.v, r, r, d.e, d.e))
@@ -83,7 +81,7 @@ test_as_u64 :: proc(t: ^testing.T) {
 
 		for d, i in data {
 			assert(i == d.i)
-			r, valid := as_u64(d.v)
+			r, valid := reflect.as_u64(d.v)
 			tc.expect(t, valid, fmt.tprintf("i:%d %s(i64 %v) !valid\n", i, #procedure, d.v))
 			tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(i64 %v) -> %v (0x%X) != %v (0x%X)\n",
 												i, #procedure, d.v, r, r, d.e, d.e))
@@ -103,7 +101,7 @@ test_as_u64 :: proc(t: ^testing.T) {
 
 		for d, i in data {
 			assert(i == d.i)
-			r, valid := as_u64(d.v)
+			r, valid := reflect.as_u64(d.v)
 			tc.expect(t, valid, fmt.tprintf("i:%d %s(i128 %v) !valid\n", i, #procedure, d.v))
 			tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(i128 %v) -> %v (0x%X) != %v (0x%X)\n",
 												i, #procedure, d.v, r, r, d.e, d.e))
@@ -119,7 +117,7 @@ test_as_u64 :: proc(t: ^testing.T) {
 
 		for d, i in data {
 			assert(i == d.i)
-			r, valid := as_u64(d.v)
+			r, valid := reflect.as_u64(d.v)
 			tc.expect(t, valid, fmt.tprintf("i:%d %s(f16 %v) !valid\n", i, #procedure, d.v))
 			tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(f16 %v) -> %v != %v\n", i, #procedure, d.v, r, d.e))
 		}
@@ -133,7 +131,7 @@ test_as_u64 :: proc(t: ^testing.T) {
 
 		for d, i in data {
 			assert(i == d.i)
-			r, valid := as_u64(d.v)
+			r, valid := reflect.as_u64(d.v)
 			tc.expect(t, valid, fmt.tprintf("i:%d %s(f32 %v) !valid\n", i, #procedure, d.v))
 			tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(f32 %v) -> %v != %v\n", i, #procedure, d.v, r, d.e))
 		}
@@ -147,7 +145,7 @@ test_as_u64 :: proc(t: ^testing.T) {
 
 		for d, i in data {
 			assert(i == d.i)
-			r, valid := as_u64(d.v)
+			r, valid := reflect.as_u64(d.v)
 			tc.expect(t, valid, fmt.tprintf("i:%d %s(f64 %v) !valid\n", i, #procedure, d.v))
 			tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(f64 %v) -> %v != %v\n", i, #procedure, d.v, r, d.e))
 		}
@@ -156,8 +154,6 @@ test_as_u64 :: proc(t: ^testing.T) {
 
 @test
 test_as_f64 :: proc(t: ^testing.T) {
-	using reflect
-
 	{
 		/* i8 */
 		Datum :: struct { i: int, v: i8, e: f64 }
@@ -169,7 +165,7 @@ test_as_f64 :: proc(t: ^testing.T) {
 
 		for d, i in data {
 			assert(i == d.i)
-			r, valid := as_f64(d.v)
+			r, valid := reflect.as_f64(d.v)
 			tc.expect(t, valid, fmt.tprintf("i:%d %s(i8 %v) !valid\n", i, #procedure, d.v))
 			tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(i8 %v) -> %v != %v\n", i, #procedure, d.v, r, d.e))
 		}
@@ -185,7 +181,7 @@ test_as_f64 :: proc(t: ^testing.T) {
 
 		for d, i in data {
 			assert(i == d.i)
-			r, valid := as_f64(d.v)
+			r, valid := reflect.as_f64(d.v)
 			tc.expect(t, valid, fmt.tprintf("i:%d %s(i16 %v) !valid\n", i, #procedure, d.v))
 			tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(i16 %v) -> %v != %v\n", i, #procedure, d.v, r, d.e))
 		}
@@ -201,7 +197,7 @@ test_as_f64 :: proc(t: ^testing.T) {
 
 		for d, i in data {
 			assert(i == d.i)
-			r, valid := as_f64(d.v)
+			r, valid := reflect.as_f64(d.v)
 			tc.expect(t, valid, fmt.tprintf("i:%d %s(i32 %v) !valid\n", i, #procedure, d.v))
 			tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(i32 %v) -> %v != %v\n", i, #procedure, d.v, r, d.e))
 		}
@@ -217,7 +213,7 @@ test_as_f64 :: proc(t: ^testing.T) {
 
 		for d, i in data {
 			assert(i == d.i)
-			r, valid := as_f64(d.v)
+			r, valid := reflect.as_f64(d.v)
 			tc.expect(t, valid, fmt.tprintf("i:%d %s(i64 %v) !valid\n", i, #procedure, d.v))
 			tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(i64 %v) -> %v != %v\n", i, #procedure, d.v, r, d.e))
 		}
@@ -234,7 +230,7 @@ test_as_f64 :: proc(t: ^testing.T) {
 
 		for d, i in data {
 			assert(i == d.i)
-			r, valid := as_f64(d.v)
+			r, valid := reflect.as_f64(d.v)
 			tc.expect(t, valid, fmt.tprintf("i:%d %s(i128 %v) !valid\n", i, #procedure, d.v))
 			tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(i128 %v) -> %v (%H) != %v (%H)\n",
 												i, #procedure, d.v, r, r, d.e, d.e))
@@ -250,7 +246,7 @@ test_as_f64 :: proc(t: ^testing.T) {
 
 		for d, i in data {
 			assert(i == d.i)
-			r, valid := as_f64(d.v)
+			r, valid := reflect.as_f64(d.v)
 			tc.expect(t, valid, fmt.tprintf("i:%d %s(f16 %v) !valid\n", i, #procedure, d.v))
 			tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(f16 %v (%H)) -> %v (%H) != %v (%H)\n",
 												i, #procedure, d.v, d.v, r, r, d.e, d.e))
@@ -265,7 +261,7 @@ test_as_f64 :: proc(t: ^testing.T) {
 
 		for d, i in data {
 			assert(i == d.i)
-			r, valid := as_f64(d.v)
+			r, valid := reflect.as_f64(d.v)
 			tc.expect(t, valid, fmt.tprintf("i:%d %s(f32 %v) !valid\n", i, #procedure, d.v))
 			tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(f32 %v (%H)) -> %v (%H) != %v (%H)\n",
 												i, #procedure, d.v, d.v, r, r, d.e, d.e))
@@ -280,7 +276,7 @@ test_as_f64 :: proc(t: ^testing.T) {
 
 		for d, i in data {
 			assert(i == d.i)
-			r, valid := as_f64(d.v)
+			r, valid := reflect.as_f64(d.v)
 			tc.expect(t, valid, fmt.tprintf("i:%d %s(f64 %v) !valid\n", i, #procedure, d.v))
 			tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(f64 %v) -> %v != %v\n", i, #procedure, d.v, r, d.e))
 		}

+ 2 - 6
tests/core/text/i18n/test_core_text_i18n.odin

@@ -118,8 +118,6 @@ TESTS := []Test_Suite{
 
 @test
 tests :: proc(t: ^testing.T) {
-	using fmt
-
 	cat: ^i18n.Translation
 	err: i18n.Error
 
@@ -142,8 +140,6 @@ tests :: proc(t: ^testing.T) {
 }
 
 main :: proc() {
-	using fmt
-
 	track: mem.Tracking_Allocator
 	mem.tracking_allocator_init(&track, context.allocator)
 	context.allocator = mem.tracking_allocator(&track)
@@ -157,9 +153,9 @@ main :: proc() {
 	}
 
 	if len(track.allocation_map) > 0 {
-		println()
+		fmt.println()
 		for _, v in track.allocation_map {
-			printf("%v Leaked %v bytes.\n", v.location, v.size)
+			fmt.printf("%v Leaked %v bytes.\n", v.location, v.size)
 		}
 	}
 }

+ 1 - 0
tests/issues/run.bat

@@ -15,6 +15,7 @@ set COMMON=-collection:tests=..\..
 ..\..\..\odin test ..\test_issue_2466.odin %COMMON% -file || exit /b
 ..\..\..\odin test ..\test_issue_2615.odin %COMMON% -file || exit /b
 ..\..\..\odin test ..\test_issue_2637.odin %COMMON% -file || exit /b
+..\..\..\odin test ..\test_issue_2666.odin %COMMON% -file || exit /b
 
 @echo off
 

+ 1 - 0
tests/issues/run.sh

@@ -18,6 +18,7 @@ $ODIN build ../test_issue_2113.odin $COMMON -file -debug
 $ODIN test ../test_issue_2466.odin $COMMON -file
 $ODIN test ../test_issue_2615.odin $COMMON -file
 $ODIN test ../test_issue_2637.odin $COMMON -file
+$ODIN test ../test_issue_2666.odin $COMMON -file
 if [[ $($ODIN build ../test_issue_2395.odin $COMMON -file 2>&1 >/dev/null | grep -c "$NO_NIL_ERR") -eq 2 ]] ; then
 	echo "SUCCESSFUL 1/1"
 else

+ 26 - 0
tests/issues/test_issue_2666.odin

@@ -0,0 +1,26 @@
+// Tests issue https://github.com/odin-lang/Odin/issues/2666
+// @(disabled=<boolean>) does not work with polymorphic procs
+package test_issues
+
+import "core:testing"
+
+@(test)
+test_disabled_parapoly :: proc(t: ^testing.T) {
+	disabled_parapoly(t, 1)
+	disabled_parapoly_constant(t, 1)
+}
+
+@(private="file")
+@(disabled = true)
+disabled_parapoly :: proc(t: ^testing.T, num: $T) {
+	testing.error(t, "disabled_parapoly should be disabled")
+}
+
+@(private="file")
+DISABLE :: true
+
+@(disabled = DISABLE)
+@(private = "file")
+disabled_parapoly_constant :: proc(t: ^testing.T, num: $T) {
+	testing.error(t, "disabled_parapoly_constant should be disabled")
+}

+ 1 - 0
vendor/fontstash/fontstash.odin

@@ -1,4 +1,5 @@
 //+build windows, linux, darwin
+//+vet !using-param
 package fontstash
 
 import "core:runtime"